diff --git a/README.md b/README.md index d65af11..2d6d9d7 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ To help users quickly understand and navigate through the CTAM repository, the f ``` Before you begin, ensure you have met the following requirements: - - Python 3.9 or higher is installed + - Python 3.10 or higher is installed - Python virtualenv and pip is installed - Install some key libraries: sudo apt-get install python3-tk sshpass jq @@ -62,9 +62,11 @@ Before you begin, ensure you have met the following requirements: 1. Sample workspace files are present inside `json_spec` directory. Modify these file as per your infra details. -2. `input` dir inside `json_spec` contains sample input file that ctam require to run +2. `input` dir inside `json_spec` is organized by `spec_version` folders (e.g., spec_1.0, spec_1.1). +Test case specific JSON files (e.g., FirmwareInventory.json, UpdateService.json) required for executing CTAM test cases are also contained in this folder. +Within each `spec_version` folder, a `workspace` folder is provided containing sample input files. -3. Create a workspace directory and copy the configuration files from `json_spec/input` directory into `workspace` dir. +3. Create a workspace directory and copy the configuration files from `json_spec/input/spec_/workspace` directory into `workspace` dir. - `.netrc` - contains bmc ipaddress, username and password - `dut_config.json` - contains various params for running the test cases @@ -86,10 +88,11 @@ Before you begin, ensure you have met the following requirements: | `-d` or `--Discovery` | | Path to workspace directory that contains test run files | `-l` or `--list` | string | List all test cases. If combined with -G then list all cases of the chosen group | `-v` or `--version` | | Lists the current version +|`--spec`| | | Specify the version to run the test case with +|`--test_help`| |boolean| Shows detailed help for a specific test case and exit +|`-c` or `--consolidate`| | | Shows consolidated test results from multiple test runs into a single final report. - -### 💻 Running the tool locally - +### Setup 1. Optional: create python [virtual environment](https://docs.python.org/3/library/venv.html) and activate. ``` python -m venv venv @@ -105,43 +108,62 @@ Before you begin, ensure you have met the following requirements: `````` Open `docs/build/html/index.html` for full documentation including architecture and test case details -4. To run suite, +### 💻 Running the tool locally +1. To run suite, ``` cd ctam python ctam.py -w ..\example_workspace ``` - Logs will be created under `example_workspace\TestRuns` -5. To list all test cases + +2. To list all test cases ``` cd ctam python ctam.py -l ``` -6. To run a specific test case +3. To run a specific test case ``` cd ctam python ctam.py -w ..\example_workspace -t ``` - Logs will be created under `example_workspace\TestRuns` -7. To run test cases of a specifc test group + +4. To run test cases of a specifc test group ``` cd ctam python ctam.py -w ..\example_workspace -g ``` - Logs will be created under `example_workspace\TestRuns` -8. To run test cases with sequence +5. To run test cases with sequence ``` cd ctam python ctam.py -w ..\example_workspace -test_seq ``` - Logs will be created under `example_workspace\TestRuns` -9. To run groups with sequence + +6. To run groups with sequence ``` cd ctam python ctam.py -w ..\example_workspace -group_seq ``` - Logs will be created under `example_workspace\TestRuns` -10. Choose test cases to run by using tags and specifying the tags to include/exclude in test_runner.json -11. Choose test sequence in test_runner.json if you want to run it from test runner config. + +7. To get the full detailed help for a specific test case + ``` + cd ctam + python ctam.py -w ..\example_workspace -t --test_help + ``` + +8. To get consolidated test results from multiple test runs into a single final report +(See: [Why Consolidation is Needed](#why-consolidation-is-needed)) + ``` + cd ctam + python ctam.py -w ..\example_workspace -c ... + ``` +9. Choose test cases to run by using tags and specifying the tags to include/exclude in test_runner.json + +10. Choose test sequence in test_runner.json if you want to run it from test runner config. + +11. All test executions can be run with or without specifying a spec version. +See: [Behaviour Based on Input Source](#behaviour-based-on-input-source) + +> **Spec Version Selection:** +> Refer to **[Spec Version Handling](#spec-version-handling)** for details on how versions are resolved during test execution. ### Sphinx-Documentation @@ -176,7 +198,8 @@ To create documentation for CTAM using Sphinx, follow these steps: `make build_image` -2. You can run the binary the same way running the python file. Just that now python file replaced by binary executable. Sample command to list all test cases. +2. You can run the binary the same way running the python file. Just that now python file replaced by binary executalbe. Sample command to list all test cases. + Note: Please move your workspace directory inside dist directory before running the binary. `cd dist && ./ctam -l` @@ -187,12 +210,20 @@ To create documentation for CTAM using Sphinx, follow these steps: ## 📑 Log Files created +Logs will be created under `example_workspace\TestRuns` 1. OCPTV Log file - All logs in OCPTV defined logging format. 2. Test_Score_<>.json - All test cases result + Final score. 3. Test_Report_<>.log - Tabulated report of test run 4. Test_Info_<>.json - Optional log file used by test interfaces (for debug) 5. RedfishCommandDetails/RedfishCommandDetails__ _<>.json - Redfish Commands used & return values (for debug) +6. RedfishInteropValidator/_ /ConfigFile_<>.ini - Configuration file auto-generated for Redfish Interop Validator run. +7. RedfishInteropValidator/_ /InteropHtmlLog_<>.html - HTML formatted output log from Redfish Interop Validator for detailed review. +8. RedfishInteropValidator/_ /InteropLog_<>.txt - Text log output from Redfish Interop Validator containing command traces and results. +9. TestReport_consolidated_<>.log - Tabulated consolidated report combining results from multiple test runs. +10. TestScore_consolidated_<>.json - merged list of test results from multiple independent test runs, presented in a unified JSON structure. +11. TestScore_Summary_<>.json - High-level summary of results, grouped by domain and compliance level,showing final outcome and scoring overview. + ## 🕹️ Test Runner Knobs Test runner knobs can be modified in `test_runner.json` to enable different logging mode. @@ -215,8 +246,8 @@ Test runner knobs can be modified in `test_runner.json` to enable different logg - We can assign different tags to different test cases. - If we run according to test case tag, then all the test cases which assigned with that tag would run irrespective of group tags. -**Note: - Tags = Group Tags Union Test Case Tags -group tags = ["G1"] and test case tags = ["L1"], so the final tags will be ["G1", "L1"]** + **Note: - Tags = Group Tags Union Test Case Tags + group tags = ["G1"] and test case tags = ["L1"], so the final tags will be ["G1", "L1"]** ## 🔀 Local Port Forwarding @@ -233,7 +264,86 @@ group tags = ["G1"] and test case tags = ["L1"], so the final tags will be ["G1" * SSHTunnelRemotePort (Use 443 as remote port address for redfish tunneling) * The `sshtunnel` library in Python is a handy tool for creating SSH tunnels, allowing you to programmatically set up and manage SSH port forwarding. It can be used to establish both local and remote port forwarding - + +## Why Consolidation is Needed + +In many testing environments, it may not be possible or practical to execute all test cases in a single continuous run. This can happen due to: + +- System resource limitations +- Hardware availability and scheduling constraints +- Incremental feature enablement across firmware versions +- The need to validate different functional components independently + +As a result, test cases may be executed in multiple independent runs, each generating its own output report. + +The **consolidation feature** enables merging the results of these separate test runs into one unified final report. This provides: + +1. A combined and comprehensive view of overall system behavior +2. A single summary score and pass/fail result +3. Consistent reporting even when execution is distributed across multiple sessions + +This ensures that analysis, scoring, and final compliance evaluation remain **complete, consistent, and streamlined**, regardless of how the tests were executed. + +## Spec Version Handling + +This framework supports execution of test cases against multiple specification versions. A specification version may be provided explicitly by the user or resolved automatically based on configuration. If no version is specified, the framework defaults to the latest available supported version. + +### Version Resolution Priority + +The version used for test execution is determined in the following order of precedence: + +| Priority (Highest to Lowest) | Source | Description | +|-----------------------------|----------------------------------|----------------------------------------------------------------------------------------| +| 1 (Highest) | Command-Line Argument (`--spec`) | Explicit version supplied during test invocation overrides all other sources. | +| 2 | `test_runner.json` Configuration | Applied only when no command-line version is provided. | +| 3 (Default) | Latest Available Version | Used when neither CLI nor configuration specifies a version. | + +### Version Compatibility Validation + +Each test case may optionally define a `spec_versions` attribute to indicate supported specification versions. This attribute may take one of the following forms: + +1. **Relational Expression** + Examples: `>= 1.0`, `< 1.1`, `== 1.0`, `> 1.0`, `<= 1.1` +2. **Explicit List of Versions** + Example: `["1.1", "1.0"]` +3. **None** + Indicates the test case is compatible with all specification versions. + +### Validation Logic + +When a version is selected (via CLI or configuration): + +- The selected version is validated against the `spec_versions` attribute of each test case. +- If the version satisfies the constraint defined by `spec_versions`, the test case is executed. +- If the version does not satisfy the constraint, the test case is **skipped**. + +### Behavior Based on Input Source + +1. **No Version Specified** + The framework automatically selects the latest available supported version. + +2. **Version Specified via Command-Line Argument (`--spec`)** + The version supplied via CLI takes highest precedence. It is validated against `spec_versions`. + - If valid → test runs + - If invalid → test is skipped + +3. **Version Specified in `test_runner.json`** + Used only when no CLI version is provided, validated the same way. + +#### Command-Line Example +``` +cd ctam +python ctam.py -w ..\example_workspace -t --spec +``` + +### Summary + +This implementation provides: + +- Controlled and deterministic version selection +- Flexible compatibility via relational or list-based version constraints +- Clear and predictable execution behavior + ## 📖 Developer notes ### VS Code diff --git a/ctam/ctam.py b/ctam/ctam.py index f8ec777..99c74cb 100644 --- a/ctam/ctam.py +++ b/ctam/ctam.py @@ -11,6 +11,7 @@ import json from pathlib import Path from datetime import datetime +import importlib # until the folder structure gets fixed to a more pip/setuptools oriented format # we need to manually adjust the path so that running the main script's imports work sys.path.append(str(Path(__file__).resolve().parent)) @@ -87,6 +88,18 @@ def parse_args(): help="Path to the previous test report for consolidation", nargs="+", ) + parser.add_argument( + "--spec", + required=False, + help="The spec version to run the test with" + ) + + parser.add_argument( + "--test_help", + action="store_true", + help="Show detailed help for a specific test case and exit" + ) + return parser.parse_args() def get_exception_details(exec: Exception = ""): @@ -151,8 +164,14 @@ def main(): raw_log_file = os.path.join(logs_output_dir, "Command_Line_Logs.log") redirect_output = RedirectOutput(raw_log_file) redirect_output.start() - default_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "json_spec", "input") - default_config_path = default_config_path.replace('/tmp/', '') if default_config_path.startswith('/tmp/') else default_config_path + + spec_version = args.spec + default_config_path = None + + if spec_version: + default_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "json_spec", "input", f"spec_{spec_version}") + default_config_path = default_config_path.replace('/tmp/', '') if default_config_path.startswith('/tmp/') else default_config_path + if not args.workspace: ifc_dir = os.path.join(os.path.dirname(__file__), "interfaces") ext_test_root_dir = os.path.join(os.path.dirname(__file__), "tests") @@ -179,6 +198,7 @@ def main(): return 1, None, "Missing required files" print(f"Version : {__version__}") print(f"WorkSpace : {args.workspace}") + print(f"Spec version: {args.spec}") if args.consolidate: print(f"Test report paths : {args.consolidate}") @@ -212,8 +232,12 @@ def main(): # NOTE: We have added internal test directory as mandatory if 'internal_testing' is true in test runner json. # NOTE: If internal_test is true in test runner json then both internal and external tests we can run, else we can continue our existing flow. - with open(test_runner_json, "r") as f: - test_runner_config = json.load(f) + + if test_runner_json: + with open(test_runner_json, "r") as f: + test_runner_config = json.load(f) + + test_runner_spec_version = test_runner_config.get("spec_version", None) internal_testing = test_runner_config.get("internal_testing", False) @@ -232,6 +256,24 @@ def main(): if args.list: test_hierarchy.print_test_groups_test_cases(args.group) return 0, None, "List of tests is printed" + + if args.test_help: + _, test_case_instances = test_hierarchy.instantiate_obj_for_testcase(args.testcase) + + for test_instance in test_case_instances: + module_name = test_instance.__class__.__module__ + if module_name in sys.modules: + module = sys.modules[module_name] + else: + module = importlib.import_module(module_name) + + module_doc = module.__doc__ + + print("\n========== Test Description ==========") + print(module_doc) + print("======================================\n") + + return 0, None, "Help for the test case is printed" if args.Discovery: runner = TestRunner( @@ -245,6 +287,8 @@ def main(): redfish_response_messages=redfish_response_messages, default_config_path=default_config_path, net_rc=net_rc, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) status_code, exit_string = runner.get_system_details() return status_code, None, exit_string @@ -262,11 +306,12 @@ def main(): default_config_path=default_config_path, consolidate = args.consolidate, net_rc=net_rc, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) status_code, exit_string = runner.consolidate_run() return status_code, None, exit_string - elif args.testcase: runner = TestRunner( workspace_dir=args.workspace, @@ -280,7 +325,10 @@ def main(): default_config_path=default_config_path, net_rc=net_rc, single_test_override=args.testcase, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) + elif args.testcase_sequence: runner = TestRunner( workspace_dir=args.workspace, @@ -294,7 +342,10 @@ def main(): default_config_path=default_config_path, net_rc=net_rc, sequence_test_override=args.testcase_sequence, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) + elif args.group: runner = TestRunner( workspace_dir=args.workspace, @@ -308,7 +359,10 @@ def main(): default_config_path=default_config_path, net_rc=net_rc, single_group_override=args.group, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) + elif args.group_sequence: runner = TestRunner( workspace_dir=args.workspace, @@ -322,9 +376,10 @@ def main(): default_config_path=default_config_path, net_rc=net_rc, sequence_group_override=args.group_sequence, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) - else: all_tests = test_hierarchy.get_all_tests() runner = TestRunner( @@ -338,7 +393,9 @@ def main(): redfish_uri_config_file=redfish_uri_config, redfish_response_messages=redfish_response_messages, default_config_path=default_config_path, - run_all_tests=all_tests + run_all_tests=all_tests, + spec_version=spec_version, + test_runner_spec_version=test_runner_spec_version, ) status_code, exit_string = runner.run() diff --git a/ctam/interfaces/functional_ifc.py b/ctam/interfaces/functional_ifc.py index 4e61b34..aeeabf5 100644 --- a/ctam/interfaces/functional_ifc.py +++ b/ctam/interfaces/functional_ifc.py @@ -13,6 +13,8 @@ import time import ast import shlex +import shutil +import glob from datetime import datetime from typing import Optional, List import ocptv.output as tv @@ -999,3 +1001,24 @@ def ctam_verify_components_health(self): if failed_devices: return False , failed_devices return True, [] + + def flatten_validator_output(self, testId, testName, logger_path): + """ + Moves RedfishInteropValidator_ output files to parent folder + and removes the extra directory to prevent long path issues. + """ + subdirs = glob.glob(os.path.join(logger_path, "RedfishInteropValidator_*")) + dest_path = os.path.join(logger_path, f"{testId}_{testName}") + os.makedirs(dest_path, exist_ok=True) + + for subdir in subdirs: + for item in os.listdir(subdir): + src = os.path.join(subdir, item) + dst = os.path.join(dest_path, item) + try: + shutil.move(src, dst) + except Exception as e: + print(f"Warning: Could not move src to dst: Reason: {e}") + + # Only remove after all files moved + shutil.rmtree(subdir, ignore_errors=True) diff --git a/ctam/interfaces/fw_update_ifc.py b/ctam/interfaces/fw_update_ifc.py index 44ebc04..08f5775 100644 --- a/ctam/interfaces/fw_update_ifc.py +++ b/ctam/interfaces/fw_update_ifc.py @@ -186,7 +186,7 @@ def ctam_stage_fw( if corrupted_component_id != None: failure_reason = "Error in creating corrupted component!" else: - failure_reason = f"Package file not found in the workspace !!!" + failure_reason = "Package file missing in workspace!" return False, failure_reason, "" if self.dut().is_debug_mode(): print(JSONFWFilePayload) @@ -203,7 +203,7 @@ def ctam_stage_fw( uri = self.dut().uri_builder.format_uri(redfish_str="{GPUMC}" + uri, component_type="GPU") if not status: self.test_run().add_log(LogSeverity.DEBUG, f"Unable to find update uri from UpdateService resource!!!") - failure_reason = "Unable to find update uri from UpdateService resource!!!" + failure_reason = "Update URI missing from UpdateService!" return False, failure_reason, "" targets = self.get_target_inventorys(targets=specific_targets) if specific_targets else [] if self.dut().is_debug_mode(): @@ -312,6 +312,7 @@ def ctam_fw_update_verify(self, image_type="default", corrupted_component_id=Non """ MyName = __name__ + "." + self.ctam_fw_update_verify.__qualname__ Update_Verified = True + exception = False update_successful = [] update_failed = [] failure_reason = "" @@ -361,7 +362,7 @@ def ctam_fw_update_verify(self, image_type="default", corrupted_component_id=Non update_failed.append(element['SoftwareId']) Update_Verified = False msg += f"Update Failed : Expected {ExpectedVersion}" - failure_reason += " " + msg + failure_reason = f"Update Failed : Expected {ExpectedVersion}" elif negative_case: # Negative test case, but expected. @@ -373,12 +374,13 @@ def ctam_fw_update_verify(self, image_type="default", corrupted_component_id=Non self.test_run().add_log(LogSeverity.DEBUG, msg) except Exception as e: - failure_reason += " Exception occured: " + str(e) + failure_reason = " Exception occured: " + str(e) + exception = True self.test_run().add_log(LogSeverity.ERROR, "Exception occured: " + str(e)) Update_Verified = False if not version_check: - if len(update_successful) > 0: + if not exception and len(update_successful) > 0: Update_Verified = True else: Update_Verified = False diff --git a/ctam/test_hierarchy.py b/ctam/test_hierarchy.py index e30c63f..286579c 100644 --- a/ctam/test_hierarchy.py +++ b/ctam/test_hierarchy.py @@ -265,25 +265,26 @@ def print_test_groups_test_cases(self, group_name=None): :param group_name: Name of group, defaults to None :type group_name: str, optional """ - t = PrettyTable(["GroupID", "GroupName", "GroupTag", "TestCaseID", "TestCaseName", "TestCaseTag", "TestCaseWeightScore"]) + t = PrettyTable(["GroupID", "GroupName", "GroupTag", "TestCaseID", "TestCaseName", "TestCaseTag", "TestCaseWeightScore", "Spec Versions"]) t.title = "Test Case info table" def print_group_info(group_name, group_info): total_cases = len(group_info["test_cases"]) if total_cases == 0: - t.add_row([group_info["group_attributes"]["group_id"], group_name, "", "", "", "", ""]) + t.add_row([group_info["group_attributes"]["group_id"], group_name, "", "", "", "", "", ""]) return count = 0 # print(f'\nGroup ID: {group_info["group_attributes"]["group_id"]} Test Group Name: {group_name}') for testcase in group_info["test_cases"]: + spec_support = testcase["attributes"].get("spec_versions", "") if count != total_cases//2: t.add_row(["", "", group_info["group_attributes"]["tags"],\ testcase["attributes"]["test_id"], testcase["testcase_name"], testcase["attributes"]["tags"],\ - testcase["attributes"]["score_weight"]]) + testcase["attributes"]["score_weight"], spec_support]) else: t.add_row([group_info["group_attributes"]["group_id"], group_name, group_info["group_attributes"]["tags"],\ testcase["attributes"]["test_id"], testcase["testcase_name"], testcase["attributes"]["tags"],\ - testcase["attributes"]["score_weight"]]) + testcase["attributes"]["score_weight"], spec_support]) count += 1 # print( # f' Test Case ID: {testcase["attributes"]["test_id"]} Test Case Name: {testcase["testcase_name"]}' @@ -293,11 +294,11 @@ def print_group_info(group_name, group_info): # If no group name is specified, print all groups. for group_name, group_info in self.test_groups.items(): print_group_info(group_name, group_info) - t.add_row(["","","","","","",""], divider=True) + t.add_row(["","","","","","","",""], divider=True) else: # Otherwise, print the specified group. - #print(self.test_groups) + # print(self.test_groups) group_info = self._find_group(group_name) # group_info = self.test_groups.get(group_name) if group_info is not None: @@ -395,7 +396,6 @@ def _instantiate_object(self, obj_info, class_name, init_param=None): if not module_name or not module_path: print("Module name or module path is missing.") return None, None - try: spec = importlib.util.spec_from_file_location( module_name, os.path.join(module_path, module_name + ".py") diff --git a/ctam/test_runner.py b/ctam/test_runner.py index 37d566b..f040b4c 100644 --- a/ctam/test_runner.py +++ b/ctam/test_runner.py @@ -22,6 +22,7 @@ from tests.test_group import TestGroup from interfaces.functional_ifc import FunctionalIfc from test_hierarchy import TestHierarchy +from packaging.version import Version, InvalidVersion from prettytable import PrettyTable import threading, time @@ -65,6 +66,8 @@ def __init__( single_group_override=None, sequence_group_override=None, run_all_tests=None, + spec_version=None, + test_runner_spec_version=None, ): """ Init function that handles test execution variations @@ -125,8 +128,19 @@ def __init__( self.package_config = package_info_json_file self.redfish_response_messages = {} self.default_config_path = default_config_path + self.show_spec_bindings = False + + if self.default_config_path: + self._show_spec_bindings() + self.show_spec_bindings = True + + self.test_runner_spec_version = test_runner_spec_version + self.single_test_override = single_test_override - runner_config = self._get_test_runner_config(test_runner_json_file) + runner_config = self._get_test_runner_config(test_runner_json_file) + runner_config_file = test_runner_json_file + self.spec_version = spec_version + self.latest_spec_version = self.get_latest_spec_version() with open(dut_info_json_file) as dut_info_json: self.dut_config = json.load(dut_info_json) @@ -139,9 +153,10 @@ def __init__( self.sanitize_logs = self.dut_config.get("properties", {}).get("SanitizeLog", {}).get("value", False) self.words_to_skip = self.get_words_to_skip() - if redfish_response_messages: + if redfish_response_messages: with open(redfish_response_messages) as resp_file: self.redfish_response_messages = json.load(resp_file) + # use override output directory if specified in test_runner.json, otherwise # use TestRuns directory below workspace directory @@ -157,6 +172,7 @@ def __init__( self.group_sequence = sequence_group_override elif runner_config.get("test_sequence", None): self.test_sequence = runner_config.get("test_sequence", None) + elif runner_config.get("group_sequence", None): self.group_sequence = runner_config.get("group_sequence", None) elif runner_config.get("active_test_suite", None): @@ -371,10 +387,9 @@ def run(self): if self.progress_bar: progress_thread = threading.Thread(target=self.display_progress_bar) progress_thread.daemon = True - group_status_set = set() group_result_set = set() - if self.test_cases: + if self.test_cases: if self.progress_bar and self.console_log is False: self.total_cases = len(self.test_cases) progress_thread.start() @@ -410,7 +425,7 @@ def run(self): """ previous_test_result = True - for index, test in enumerate(self.test_sequence): + for index, test in enumerate(self.test_sequence): if test == "PROF": prev_test = self.test_sequence[index - 1] @@ -428,10 +443,9 @@ def run(self): group_inc_tags = group_instance.tags # group_exc_tags = group_instance.exclude_tags - group_status, group_result = self._run_group_test_cases(group_instance, test_case_instances) previous_test_result = group_result.value - + group_status_set.add(group_status) group_result_set.add(group_result) @@ -688,8 +702,7 @@ def load_fixed_json(self, file_path): print(f"Error decoding JSON in {file_path}: {e}") return [] - - + def _run_group_test_cases(self, group_instance, test_case_instances): """ for now, create a separate test run for each group. In the event of failures @@ -708,19 +721,26 @@ def _run_group_test_cases(self, group_instance, test_case_instances): try: if not self.comp_tool_dut: + if not self.spec_version: + if self.test_runner_spec_version: + spec_version = Version(self.test_runner_spec_version) + else: + spec_version, _ = self.get_latest_spec_version() + self.initialize_spec_path(spec_version) self._start(group_instance.__class__.__name__) self.active_run.start(dut=tv.Dut(id=group_instance.__class__.__name__)) group_instance.setup() for test_instance in test_case_instances: + status, error_msg = self.resolve_and_load_spec_version(test_instance) test_inc_tags = test_instance.tags tags = list(set(test_inc_tags) | set(group_instance.tags)) valid = self._is_enabled( self.include_tags_set, tags, self.exclude_tags_set, - ) + ) if not valid and not self.single_test_override: msg = f"Test {test_instance.__class__.__name__} skipped due to tags. tags = {test_inc_tags}" skipped_test = self.active_run.add_step(name=f"<{test_instance.test_id} - {test_instance.test_name}>") @@ -748,14 +768,19 @@ def _run_group_test_cases(self, group_instance, test_case_instances): self.comp_tool_dut.logger = logger execution_starttime = time.perf_counter() failure_reason = "" - test_result, failure_reason = test_instance.run() - if ( - test_result == TestResult.FAIL - ): # if any test fails, the group fails - group_result = TestResult.FAIL + if not status: + msg = f"{test_instance.__class__.__name__} skipped due to :{error_msg}" + self.active_run.add_log(severity=LogSeverity.ERROR, message=msg) + failure_reason = "spec version not supported" + else: + test_result, failure_reason = test_instance.run() + if ( + test_result == TestResult.FAIL + ): # if any test fails, the group fails + group_result = TestResult.FAIL except: exception_details = traceback.format_exc() - failure_reason += " " + exception_details + failure_reason = exception_details self.active_run.add_log( severity=LogSeverity.FATAL, message=exception_details ) @@ -789,7 +814,7 @@ def _run_group_test_cases(self, group_instance, test_case_instances): self.test_result_data.append(test_tuple) # Removing duplicates and storing the latest result - self.filter_and_update_test_results(test_instance, execution_time) + self.filter_and_update_test_results(test_instance, execution_time, failure_reason) self.score_logger.write(json.dumps(msg)) grade = ( @@ -839,7 +864,7 @@ def _run_group_test_cases(self, group_instance, test_case_instances): return group_status, group_result - def filter_and_update_test_results(self, test_instance, execution_time): + def filter_and_update_test_results(self, test_instance, execution_time, failure_reason=None): """ Filter and update the test result cache with the current test instance based on result and execution time. """ @@ -854,7 +879,8 @@ def filter_and_update_test_results(self, test_instance, execution_time): "ExecutionTime": execution_time, "TestCaseScoreWeight": test_instance.score_weight, "TestCaseScore": test_instance.score, - "TestCaseResult": test_result + "TestCaseResult": test_result, + "FailureReason": failure_reason } # Append all test data for detailed logging self.test_all_cache.append(result_entry) @@ -1176,7 +1202,6 @@ def generate_full_log_from_summary(self): with open(self.test_summary_path, "r") as f: test_report = json.load(f) - # --- Domain Table --- domain_headers = [ "Domain ID", "Domain", "TestCases Available", "TestCases Executed", @@ -1298,7 +1323,7 @@ def generate_full_log_from_summary(self): # --- Test Results Table --- test_headers = [ - "Test ID", "Test Name", "Execution Time", "TestCase Weight", "Test Score", "Test Result" + "Test ID", "Test Name", "Execution Time", "TestCase Weight", "Test Score", "Test Result", "Failure Reason" ] test_title = f"Test Result - V {__version__}" @@ -1317,10 +1342,17 @@ def generate_full_log_from_summary(self): exec_t = 0.0 else: exec_t = test["ExecutionTime"] - + failure_reason = test.get("FailureReason", "") + # Clean up escaped newlines/tabs + failure_reason = failure_reason.replace("\\n", " ").replace("\\t", " ") + words = failure_reason.split() + failure_reason = " ".join(words[:8]) # keep only first 8 words + if len(words) > 8: + failure_reason += " ..." # optional: indicate truncation test_rows.append([ test["TestID"], test["TestName"], self.seconds_to_time(exec_t), - test["TestCaseScoreWeight"], test["TestCaseScore"], test["TestCaseResult"] + test["TestCaseScoreWeight"], test["TestCaseScore"], test["TestCaseResult"], + failure_reason ]) # Accumulate total weight, and score @@ -1338,7 +1370,7 @@ def generate_full_log_from_summary(self): c_weight = t_total_weight if self.consolidate and self.inside else TestCase.max_compliance_score score_t = t_total_score if self.consolidate and self.inside else TestCase.total_compliance_score - test_total_row = ["Total", "", self.seconds_to_time(self.t_execution_time), c_weight, score_t, f"{t_grade_total}%"] + test_total_row = ["Total", "", self.seconds_to_time(self.t_execution_time), c_weight, score_t, f"{t_grade_total}%", ""] self.print_pretty_table(test_title, test_headers, test_rows, total_row=test_total_row) # Print the test results table @@ -1372,6 +1404,9 @@ def print_pretty_table(self, title, headers, rows, total_row=None): table.add_row(["" for _ in headers], divider=True) table.add_row(total_row) + if "Failure Reason" in headers: + table.align["Failure Reason"] = "l" + print(table) # Write the table to the test result file @@ -1508,3 +1543,153 @@ def post_proces_logs(self, log_path: str = "") -> None: severity=LogSeverity.FATAL, message=exception_details ) # status_code, exit_string = 1, f"Test failed due to execption: {repr(e)}" + + def get_latest_spec_version(self): + base_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "json_spec", "input") + + if not os.path.exists(base_path): + raise FileNotFoundError(f"Base path not found: {base_path}") + + spec_folders = [ + d for d in os.listdir(base_path) + if d.startswith("spec_") and os.path.isdir(os.path.join(base_path, d)) + ] + + if not spec_folders: + raise FileNotFoundError("No spec_* folders found under json_spec/input") + + versions = [] + for folder in spec_folders: + version_str = folder.replace("spec_", "") + try: + versions.append(Version(version_str)) + except InvalidVersion: + print(f"Skipping invalid spec folder: {folder}") + + if not versions: + raise FileNotFoundError("No valid spec_* folders found under json_spec/input") + + versions_sorted = sorted(versions) + latest_version = versions_sorted[-1] + all_versions = [str(v) for v in versions_sorted] + + return latest_version, all_versions + + def is_supported_version(self, test_instance, version): + spec_support = getattr(test_instance, "spec_versions", None) + if isinstance(spec_support, list): + support_versions = [Version(v) for v in spec_support] + if version in support_versions: + return True + elif isinstance(spec_support, str): + sign, support = spec_support.split() + spec_support = Version(support) + expr = f"({version} {sign} {spec_support})" + return eval(expr) + + def resolve_and_load_spec_version(self, test_instance): + """ + Determines and loads the appropriate specification version for the test case + based on inputs from the command line, the test_runner.json file, and the + versions supported by the test case itself. + + Decision logic: + 1. If no spec version is provided via command line or test_runner.json: + → Use and load the latest available spec version. + 2. If a spec version is provided via the command line: + → Give it first priority. + → Validate it against all known versions and the test case’s supported versions. + → Run if valid and supported; otherwise, fail with an appropriate message. + 3. If no command-line spec is passed but a version exists in test_runner.json: + → Validate it against the available and supported versions. + → Load and run if valid; otherwise, fail. + + Args: + test_instance (object): The test case instance whose spec compatibility + and version need to be resolved. + + Returns: + tuple: + (bool, str or None) + - bool: Indicates whether the spec version was successfully resolved and loaded. + - str or None: Error message if resolution fails, None otherwise. + """ + spec_support = getattr(test_instance, "spec_versions", None) + spec_version = self.spec_version + spec_version = Version(spec_version) if spec_version else None + result = False + if spec_version: + result = self.is_supported_version(test_instance, spec_version) + + test_runner_spec_version = Version(self.test_runner_spec_version) if self.test_runner_spec_version else None + latest_version, all_versions_str = self.get_latest_spec_version() + all_versions = [Version(v) for v in all_versions_str] + + # Case 1: If spec is missing from both test runner json and cmd line, then go with the latest version + if (not test_runner_spec_version and not spec_version): + self.initialize_spec_path(latest_version) + print(f"\033[31mPicking the latest version since not specified in test_runner.json and not passed through the command line: {latest_version}\033[0m") + return True, None + + # Case 2: Giving first preference to spec version passed through the command line + if spec_version: + # Validate version + is_valid_version = spec_version in all_versions and spec_version <= latest_version + if not is_valid_version: + return False, "The spec version passed through the command line is not a valid version" + + if spec_support: + if result: + print("################### running with spec_version :", spec_version) + return True, None + else: + return False, "The spec version passed through the cmd is not supported by the test case" + else: + print("################### running with spec_version :", spec_version) + return True, None + + # take the test_runner_spec_version if the spec version is not passed through cmd line + is_valid_version = test_runner_spec_version in all_versions and test_runner_spec_version <= latest_version + if not is_valid_version: + return False, "The spec version in test_runner.json is not a valid version" + + if self.is_supported_version(test_instance, test_runner_spec_version): + self.initialize_spec_path(test_runner_spec_version) + print("########## Running the test with the spec version specified in the test runner :", test_runner_spec_version) + return True, None + else: + return False, "The version provided in the test_runner.json is not supported by the test case" + + + def initialize_spec_path(self, spec_version=None): + """ + Initializes and sets the default path for the given specification version. + + Args: + spec_version (str, optional): The specification version to initialize. + + Notes: + - Ensures the spec bindings are displayed only once. + - Only prepares the path reference; it does not create or modify any files. + """ + self.default_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "json_spec", "input", f"spec_{spec_version}") + self.default_config_path = self.default_config_path.replace('/tmp/', '') if self.default_config_path.startswith('/tmp/') else self.default_config_path + if not self.show_spec_bindings: + self._show_spec_bindings() + self.show_spec_bindings = True + + def _show_spec_bindings(self): + json_file_path = os.path.join( + self.default_config_path, + "spec_bindings.json" + ) + + if os.path.exists(json_file_path): + with open(json_file_path, "r") as f: + content = json.load(f) + print("\n========== OCP SPEC BINDINGS ==========") + print(json.dumps(content, indent=4)) + print("=================================\n") + else: + print(f"File not found: {json_file_path}") + diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_time.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_time.py index e2370ab..1d655fa 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_time.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_time.py @@ -7,6 +7,7 @@ :Test ID: F64 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case verifies that the firmware activation operation does not exceed the maximum time specified in the requirements. The test stages the firmware update, activates it, and verifies the update within the @@ -39,7 +40,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -66,6 +67,7 @@ class CTAMTestFullDeviceUpdateActivationTime(TestCase): score_weight: int = 10 tags: List[str] = ["L1"] compliance_level: str = "L1" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -95,7 +97,7 @@ def run(self) -> TestResult: step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore with step1.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck() - failure_reason += status_msg + failure_reason = status_msg if not status: step1.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -106,21 +108,21 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac(check_time=True) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -130,14 +132,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -147,7 +149,7 @@ def run(self) -> TestResult: step4.add_log( LogSeverity.INFO, f"{self.test_id} : Update Verification Failed" ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_with_failed_component.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_with_failed_component.py index 3a3bc19..d69a527 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_with_failed_component.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_activation_with_failed_component.py @@ -7,6 +7,7 @@ :Test ID: F56 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test verifies the robustness of the firmware update stack when one component fails to stage. The vendor must provide a firmware package where one component image is corrupted, while all other @@ -32,7 +33,7 @@ Optional - , , , : Required - , , , , , , , - Optional - , + Optional - , ShotPowerCycle>, : Required - , , , Optional - , @@ -41,7 +42,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -68,6 +69,7 @@ class CTAMTestFullDeviceUpdateActivationWithFailedComponent(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -104,7 +106,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : Corrupt Component Id Retrieval Failed" ) result = False - failure_reason += f"{self.test_id} : Corrupt Component Id Retrieval Failed" + failure_reason = "Corrupt Component Id Retrieval Failed" else: step0.add_log(LogSeverity.INFO, f"{self.test_id} : Corrupt Component Id Retrieved") @@ -112,7 +114,7 @@ def run(self) -> TestResult: step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore with step1.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck() - failure_reason += status_msg + failure_reason = status_msg if not status: step1.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -125,21 +127,21 @@ def run(self) -> TestResult: status, msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(image_type="corrupt_component", corrupted_component_id=self.corrupted_component_id ) - failure_reason += " " + msg + failure_reason = msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -149,7 +151,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: @@ -158,7 +160,7 @@ def run(self) -> TestResult: status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(image_type="corrupt_component", corrupted_component_id=self.corrupted_component_id ) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -168,7 +170,7 @@ def run(self) -> TestResult: step4.add_log( LogSeverity.INFO, f"{self.test_id} : Update Verification Failed" ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_in_loop.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_in_loop.py index 8c12897..3643355 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_in_loop.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_in_loop.py @@ -8,6 +8,7 @@ :Test ID: F8 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case performs a full firmware update in a loop to ensure stability and verify that ongoing rollbacks are not affected by subsequent updates. The test stages the firmware update, activates it, and verifies the update in a loop. @@ -34,7 +35,7 @@ : Required - Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -60,6 +61,7 @@ class CTAMTestFullDeviceUpdateInLoop(TestCase): score_weight: int = 10 tags: List[str] = ["L1"] compliance_level: str = "L1" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -92,7 +94,7 @@ def run(self) -> TestResult: status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck( image_type="backup" ) - failure_reason += status_msg + failure_reason = status_msg if not status: step1.add_log(LogSeverity.INFO, f"[{self.test_id}] : FW Update Capable") else: @@ -106,14 +108,14 @@ def run(self) -> TestResult: status, status_msg, task_id =self.group.fw_update_ifc.ctam_stage_fw( wait_for_stage_completion=False ) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False unexpected_error = False @@ -124,7 +126,7 @@ def run(self) -> TestResult: disturb_count = 1 while keep_disturbing: status, msg, _ = self.group.fw_update_ifc.ctam_stage_fw(image_type="backup") - failure_reason += " " + msg + failure_reason = msg if status: keep_disturbing = False step3.add_log( @@ -142,7 +144,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Disturb Failed, but with incorrect message.", ) - failure_reason += " " + "FW Update Disturb Failed, but with incorrect message." + failure_reason = "FW Update Disturb incorrect message." keep_disturbing = False unexpected_error = True @@ -152,13 +154,13 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Disturb was successful in first shot - Not Expected", ) - failure_reason += " " + f"{self.test_id} : FW Update Disturb was successful in first shot - Not Expected" + failure_reason = "FW Update Disturb unexpectedly successful" if unexpected_error and task_id: step3.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Failed with Unexpected error - Waiting for pervious FW staging to be completed", ) - failure_reason += " " + f"{self.test_id} : FW Update Failed with Unexpected error - Waiting for pervious FW staging to be completed" + failure_reason = "Waiting for previous FW staging" status, json_data = self.group.fw_update_ifc.ctam_monitor_task(TaskID=task_id) @@ -166,7 +168,7 @@ def run(self) -> TestResult: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") # type: ignore with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -176,14 +178,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result or unexpected_error: step5 = self.test_run().add_step(f"{self.__class__.__name__} run(), step5") with step5.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(image_type="backup") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step5.add_log( LogSeverity.INFO, @@ -194,7 +196,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : Update Verification Failed", ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_nocheck.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_nocheck.py index 27f5e9e..b13cf61 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_nocheck.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_nocheck.py @@ -7,6 +7,7 @@ :Test ID: F5 :Group Name: fw_update :Score Weight: 50 +:Spec Versions: ">= 1.0" :Description: This test case verifies that the firmware staging is successful followed by activate. The test can fail with stage failure. No precheck and postcheck is performed in this test case. @@ -33,7 +34,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -60,6 +61,7 @@ class CTAMTestFullDeviceUpdateNoCheck(TestCase): score_weight: int = 50 tags: List[str] = ["L1"] compliance_level: str = "L1" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -90,21 +92,21 @@ def run(self) -> TestResult: step1= self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore with step1.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step1.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step1.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Staging failed" ) - failure_reason += " " + "FW Update Staging failed" + failure_reason = "FW Update Staging failed" result = False if result: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -114,7 +116,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_ping_pong.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_ping_pong.py index 11fa2ff..fe222ad 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_ping_pong.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_ping_pong.py @@ -7,6 +7,7 @@ :Test ID: F88 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case entails performing firmware updates in a loop between N-1 and N i.e backup and default respectively. The test ensures that the firmware updates can be performed repeatedly without issues. @@ -33,7 +34,7 @@ : Required - Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -60,6 +61,7 @@ class CTAMTestFullDeviceUpdatePingPong(TestCase): score_weight: int = 10 tags: List[str] = ["Compliance", "L1"] compliance_level: str = "L1" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -96,7 +98,7 @@ def run(self) -> TestResult: status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck( image_type=image_t ) - failure_reason += status_msg + failure_reason = status_msg if not status: step1.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Capable" @@ -110,7 +112,7 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(image_type=image_t) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Staged" @@ -120,14 +122,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed", ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -137,7 +139,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: @@ -148,7 +150,7 @@ def run(self) -> TestResult: status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify( image_type=image_t ) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -159,7 +161,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : Update Verification Failed", ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_precheck_only.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_precheck_only.py index f5a7725..140b75c 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_precheck_only.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_precheck_only.py @@ -7,6 +7,7 @@ :Test ID: F3 :Group Name: fw_update :Score Weight: 0 +:Spec Versions: ">= 1.0" :Description: This test case verifies the precheck flow and expects SoftwareInventory to match against the new firmware @@ -31,7 +32,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -58,6 +59,7 @@ class CTAMTestFullDeviceUpdatePrecheckOnly(TestCase): score_weight: int = 0 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -95,7 +97,7 @@ def run(self) -> TestResult: step1.add_log( LogSeverity.ERROR, f"{self.test_id} : Software Inventory does not match against the new firmware" ) - failure_reason += " " + "Precheck verification failed" + failure_reason = "Precheck verification failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_rollback.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_rollback.py index 0bdac82..30f0f79 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_rollback.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_rollback.py @@ -7,6 +7,7 @@ :Test ID: F0 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case verifies the successful execution of a full firmware update process, focusing on the backup image (N-1). It ensures that the firmware update is staged, activated, and verified without errors. @@ -37,7 +38,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -64,6 +65,7 @@ class CTAMTestFullDeviceUpdateRollback(TestCase): score_weight: int = 10 tags: List[str] = ["Compliance", "L0"] compliance_level: str = "L0" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -106,21 +108,21 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(image_type="backup") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -130,14 +132,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(image_type="backup") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -147,7 +149,7 @@ def run(self) -> TestResult: step4.add_log( LogSeverity.INFO, f"{self.test_id} : Update Verification Failed" ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_only.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_only.py index 43c81c4..63cc80f 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_only.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_only.py @@ -7,6 +7,7 @@ :Test ID: F2 :Group Name: fw_update :Score Weight: 20 +:Spec Versions: ">= 1.0" :Description: This test case verifies that the firmware staging is successful. The test can fail with stage failure. No precheck and postcheck is performed in this test case. @@ -31,7 +32,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -58,6 +59,7 @@ class CTAMTestFullDeviceUpdateStagingOnly(TestCase): score_weight: int = 20 tags: List[str] = ["L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -88,14 +90,14 @@ def run(self) -> TestResult: step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore with step1.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step1.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step1.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Staging failed" ) - failure_reason += " " + "FW Update Staging failed" + failure_reason = "FW Update Staging failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_time.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_time.py index 6b6d818..1b5521e 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_time.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_time.py @@ -7,6 +7,7 @@ :Test ID: F63 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" Description: This test verifies that the firmware copy operation (staging) does not exceed the maximum time specified in the requirements(FwStagingTimeMax). @@ -38,7 +39,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +66,7 @@ class CTAMTestFullDeviceUpdateStagingTime(TestCase): score_weight: int = 10 tags: List[str] = ["L1"] compliance_level: str = "L1" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -105,14 +107,14 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(check_time=True) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_with_failed_component.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_with_failed_component.py index 340fc9f..9d5934d 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_with_failed_component.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_staging_with_failed_component.py @@ -7,7 +7,7 @@ :Test ID: F55 :Group Name: fw_update :Score Weight: 10 - +:Spec Versions: ">= 1.0" :Description: Firmware Update Stack Robustness Test. If one component copying (staging) fails, verify other component copying (staging) goes through. Vendor needs to provide a fwpkg where a component image is corrupted (If it not provided in package info testcase can generate), and all other component images are good (i.e. it will not fail the update). @@ -38,7 +38,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +65,7 @@ class CTAMTestFullDeviceUpdateStagingWithFailedComponent(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -100,7 +101,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : Corrupt Component Id Retrieval Failed" ) result = False - failure_reason += f"{self.test_id} : Corrupt Component Id Retrieval Failed" + failure_reason = "Corrupt Component Id Retrieval Failed" else: step0.add_log(LogSeverity.INFO, f"{self.test_id} : Corrupt Component Id Retrieved") @@ -108,7 +109,7 @@ def run(self) -> TestResult: step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore with step1.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck() - failure_reason += status_msg + failure_reason = status_msg if not status: step1.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -121,14 +122,14 @@ def run(self) -> TestResult: status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(image_type="corrupt_component", corrupted_component_id=self.corrupted_component_id ) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_with_older_version.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_with_older_version.py index 8017721..eb63600 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_with_older_version.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_full_device_update_with_older_version.py @@ -7,6 +7,7 @@ :Test ID: F19 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case focuses on updating the firmware of the full device using an older or previous version @@ -39,7 +40,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -66,6 +67,7 @@ class CTAMTestFullDeviceUpdateWithOlderVersion(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -114,7 +116,7 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(image_type=image_t) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Staged" @@ -123,14 +125,14 @@ def run(self) -> TestResult: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -140,14 +142,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(image_type=image_t) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -158,7 +160,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : Update Verification Failed", ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_corrupt_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_corrupt_image_update.py index 28e334a..7b222d7 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_corrupt_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_corrupt_image_update.py @@ -7,6 +7,7 @@ :Test ID: F23 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case is a negative test. It searches for GPU_FW_IMAGE_CORRUPT referenced by package_info.json and attempts @@ -39,7 +40,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -66,6 +67,7 @@ class CTAMTestNegativeCorruptImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -106,18 +108,19 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="corrupt") - failure_reason += status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Staged Initiation Failed as Expected", ) else: + failure_reason += " " + "FW Update Staging Initiated - Unexpected" step2.add_log( LogSeverity.ERROR, - f"{self.test_id} : FW Update Staging Initiated - Unexpected", + f"{self.test_id} : {failure_reason}", ) - failure_reason += " " + "FW Update Staging Initiated - Unexpected" + failure_reason = "FW Update Staging Initiated - Unexpected" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_empty_metadata_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_empty_metadata_image_update.py index e9c5dcb..c76fac1 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_empty_metadata_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_empty_metadata_image_update.py @@ -7,6 +7,7 @@ :Test ID: F26 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case is a negative test. It makes a copy of the default FW image provided in package_info.json @@ -39,7 +40,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -66,6 +67,7 @@ class CTAMTestNegativeEmptyMetadataImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2", "Single_Device"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -115,7 +117,7 @@ def run(self) -> TestResult: step3.add_log(LogSeverity.INFO, f"{self.test_id} : Single Device Selected") else: step3.add_log(LogSeverity.ERROR, f"{self.test_id} : Single Device Selection Failed") - failure_reason += f"{self.test_id} : Single Device Selection Failed" + failure_reason = "Single Device Selection Failed" result = False step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4_{corrupted_component_list[0]}") # type: ignore @@ -123,7 +125,7 @@ def run(self) -> TestResult: status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="empty_metadata", corrupted_component_id=corrupted_component_id, specific_targets=[corrupted_component_list[0]]) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -134,7 +136,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Staging Initiated - Unexpected", ) - failure_reason += " " + "FW Update Staging Initiated - Unexpected" + failure_reason = "FW Update Staging Initiated - Unexpected" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_full_device_update_staging_interruption_with_ac_reset.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_full_device_update_staging_interruption_with_ac_reset.py index c572a31..32bfb89 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_full_device_update_staging_interruption_with_ac_reset.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_full_device_update_staging_interruption_with_ac_reset.py @@ -7,6 +7,7 @@ :Test ID: F24 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test verifies the behavior of a full device firmware update staging process when interrupted by an AC power reset. @@ -40,7 +41,7 @@ : Required - Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -67,6 +68,7 @@ class CTAMTestFullDeviceUpdateStagingInterruptionWithAcReset(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2", "Single_Device"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -109,21 +111,21 @@ def run(self) -> TestResult: with step2.scope(): fwupd_status, status_msg, fwupd_task_id = self.group.fw_update_ifc.ctam_stage_fw(image_type="backup", wait_for_stage_completion=False) - failure_reason += " " + status_msg + failure_reason = status_msg if fwupd_status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -133,14 +135,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(version_check=False) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -151,13 +153,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : Update Verification Failed", ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() self.result = TestResult.PASS if result else TestResult.FAIL if self.result == TestResult.PASS: self.score = self.score_weight + failure_reason = "" # call super last to log result and score super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_device_uuid_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_device_uuid_image_update.py index c598ce0..9c7b3a7 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_device_uuid_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_device_uuid_image_update.py @@ -7,6 +7,7 @@ :Test ID: F28 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case is a negative test. It makes a copy of the default FW image provided in package_info.json @@ -39,7 +40,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -66,6 +67,7 @@ class CTAMTestNegativeInvalidDeviceUUIDImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -107,7 +109,7 @@ def run(self) -> TestResult: with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="invalid_device_uuid") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, @@ -118,7 +120,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Staging Initiated - Unexpected", ) - failure_reason += " " + "FW Update Staging Initiated - Unexpected" + failure_reason = "FW Update Staging Initiated - Unexpected" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_package_uuid_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_package_uuid_image_update.py index a716488..2abfd3d 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_package_uuid_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_package_uuid_image_update.py @@ -7,6 +7,7 @@ :Test ID: F27 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case is a negative test. It makes a copy of the default FW image provided in package_info.json @@ -38,7 +39,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +66,7 @@ class CTAMTestNegativeInvalidPackageUUIDImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -106,7 +108,7 @@ def run(self) -> TestResult: with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="invalid_pkg_uuid") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, @@ -117,7 +119,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Staging Initiated - Unexpected", ) - failure_reason += " " + "FW Update Staging Initiated - Unexpected" + failure_reason = "FW Update Staging Initiated - Unexpected" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_signed_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_signed_image_update.py index c9a8177..c581eea 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_signed_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_invalid_signed_image_update.py @@ -7,7 +7,7 @@ :Test ID: F22 :Group Name: fw_update :Score Weight: 10 - +:Spec Versions: ">= 1.0" :Description: This test case focuses on the scenario where we are trying to initiate a firmware update flow with an invalid signed image. The expectation is for staging to fail when this is attempted. The image is provided by the vendor and it has a section in package_info.json. @@ -38,7 +38,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +65,7 @@ class CTAMTestNegativeInvalidSignedImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -106,7 +107,7 @@ def run(self) -> TestResult: with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="invalid_sign") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, @@ -117,7 +118,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Staging Initiated - Unexpected", ) - failure_reason += " " + "FW Update Staging Initiated - Unexpected" + failure_reason = "FW Update Staging Initiated - Unexpected" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_large_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_large_image_update.py index 550d372..f35bf06 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_large_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_large_image_update.py @@ -7,6 +7,7 @@ :Test ID: F25 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case focuses on the scenario where a large image transfer is initiated for a firmware update. The expectation is for the staging process to fail when this is attempted. @@ -37,7 +38,7 @@ : Required - Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -63,6 +64,7 @@ class CTAMTestNegativeLargeImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_device_update_with_illegal_targets.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_device_update_with_illegal_targets.py index 94a2b6e..7452ed5 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_device_update_with_illegal_targets.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_device_update_with_illegal_targets.py @@ -7,6 +7,7 @@ :Test ID: F32 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case focuses on the scenario where a GPU baseboard has multiple targets, some of which are updatable and others which are not. The goal of this test case is to identify the list of firmware inventory targets @@ -38,7 +39,7 @@ : Required - Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -64,6 +65,7 @@ class CTAMTestNegativeSingleDeviceUpdateWithIllegalTargets(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L3", "Single_Device"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -104,14 +106,14 @@ def run(self) -> TestResult: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Single Device Selected") else: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Single Device Selection Failed") - failure_reason += f"{self.test_id} : Single Device Selection Failed" + failure_reason += "Single Device Selection Failed" result = False if result: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck() - failure_reason += status_msg + failure_reason = status_msg if not status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -124,21 +126,21 @@ def run(self) -> TestResult: with step3.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw( partial=1, specific_targets=component_list) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step3.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") # type: ignore with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -148,14 +150,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step5 = self.test_run().add_step(f"{self.__class__.__name__} run(), step5") with step5.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(image_type="negate") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step5.add_log( LogSeverity.INFO, @@ -165,7 +167,7 @@ def run(self) -> TestResult: step5.add_log( LogSeverity.INFO, f"{self.test_id} : Update Verification Failed" ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_fw_update_staging_interruption_with_ac_reset.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_fw_update_staging_interruption_with_ac_reset.py index 4449acf..54caa4b 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_fw_update_staging_interruption_with_ac_reset.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_single_fw_update_staging_interruption_with_ac_reset.py @@ -7,6 +7,7 @@ :Test ID: F62 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case focuses on the scenario where an image transfer is initiated for a firmware update. The objective is to ensure that after the image transfer is initiated, the system proceeds without waiting @@ -39,7 +40,7 @@ Optional - """ import ast -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +66,7 @@ class CTAMTestSingleFWUpdateStagingInterruptionWithACReset(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L3", "Single_Device"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -101,14 +103,14 @@ def run(self) -> TestResult: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Single Device Selected") else: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Single Device Selection Failed") - failure_reason += f"{self.test_id} : Single Device Selection Failed" + failure_reason += "Single Device Selection Failed" result = False if result: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck(image_type="backup") - failure_reason += status_msg + failure_reason = status_msg if not status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -121,36 +123,36 @@ def run(self) -> TestResult: partial=1, wait_for_stage_completion=False, image_type="backup", specific_targets=ast.literal_eval(self.dut().uri_builder.format_uri(redfish_str="{specific_targets}", component_type="GPU_FWUpdate")) ) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staging Initiated") else: step3.add_log(LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Initiation Failed") - failure_reason += " " + "FW Update Stage Initiation Failed" + failure_reason = "FW Update Stage Initiation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") # type: ignore with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac(fwupd_hyst_wait=False) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Activate, interrupting staging flow with reset") else: step4.add_log(LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed") - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step5 = self.test_run().add_step(f"{self.__class__.__name__} run(), step5") with step5.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify(image_type="negate") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step5.add_log(LogSeverity.INFO, f"{self.test_id} : Update Verification Completed") else: step5.add_log(LogSeverity.INFO, f"{self.test_id} : Update Verification Failed") - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_bundle_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_bundle_update.py index bdc2b50..33a4518 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_bundle_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_bundle_update.py @@ -7,6 +7,7 @@ :Test ID: F90 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case is a negative test. It searches for GPU_FW_IMAGE_UNSIGNED_BUNDLE referenced by package_info.json. If the bundle is not provided, it will modify GPU_FW_IMAGE (golden fwpkg) for this test. Then it will attempt @@ -38,7 +39,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +66,7 @@ class CTAMTestNegativeUnsignedBundleUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -105,7 +107,7 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="unsigned_bundle") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, @@ -116,7 +118,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Staging Initiated - Unexpected", ) - failure_reason += " " + "FW Update Staging Initiated - Unexpected" + failure_reason = "FW Update Staging Initiated - Unexpected" result = False diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_component_image_update.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_component_image_update.py index ff75ba1..8a9f5e1 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_component_image_update.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_negative_unsigned_component_image_update.py @@ -7,6 +7,7 @@ :Test ID: F18 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case is a negative test. It searches for GPU_FW_IMAGE_UNSIGNED referenced by package_info.json and attempts a firmware update using the unsigned image. @@ -37,7 +38,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -64,6 +65,7 @@ class CTAMTestNegativeUnsignedImageUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Negative", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -104,7 +106,7 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, image_type="unsigned_component_image") - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, @@ -115,7 +117,7 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Staging Initiated - Unexpected", ) - failure_reason += " FW Update Staging Initiated - Unexpected" + failure_reason = " FW Update Staging Initiated - Unexpected" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N/ctam_test_single_device_update_ping_pong.py b/ctam/tests/fw_update/fw_update_group_N/ctam_test_single_device_update_ping_pong.py index 4648614..0473573 100644 --- a/ctam/tests/fw_update/fw_update_group_N/ctam_test_single_device_update_ping_pong.py +++ b/ctam/tests/fw_update/fw_update_group_N/ctam_test_single_device_update_ping_pong.py @@ -7,6 +7,7 @@ :Test ID: F89 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case entails performing firmware updates in a loop, one device at a time but using different versions for each update.The test flow should go from N to N-1 image during the firmware update process. @@ -37,7 +38,7 @@ Optional - """ import ast -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -63,6 +64,7 @@ class CTAMTestSingleDeviceUpdatePingPong(TestCase): score_weight: int = 10 tags: List[str] = ["L3", "Single_Device"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupN): """ @@ -106,7 +108,7 @@ def run(self) -> TestResult: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Single Device Selected") else: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Single Device Selection Failed") - failure_reason += f"{self.test_id} : Single Device Selection Failed" + failure_reason += "Single Device Selection Failed" result = False image_t = "default" if i % 2 == 0 else "backup" @@ -125,24 +127,24 @@ def run(self) -> TestResult: with step3.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw( partial=1, image_type=image_t, specific_targets=self.specific_targets) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step3.add_log(LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed") - failure_reason += " FW Update Stage Failed" + failure_reason = " FW Update Stage Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4_{i}") # type: ignore with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Activate") else: step4.add_log(LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed") - failure_reason += " FW Update Activation Failed" + failure_reason = " FW Update Activation Failed" result = False if result: @@ -154,7 +156,7 @@ def run(self) -> TestResult: step5.add_log(LogSeverity.INFO, f"{self.test_id} : Update Verification Completed") else: step5.add_log(LogSeverity.INFO, f"{self.test_id} : Update Verification Failed") - failure_reason += " Update Verification Failed" + failure_reason = " Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_full_device_update.py b/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_full_device_update.py index 3fd586f..059ff6a 100644 --- a/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_full_device_update.py +++ b/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_full_device_update.py @@ -7,6 +7,7 @@ :Test ID: F1 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: Basic test case of full firmware update. This test case verifies the successful execution of a full firmware update process. @@ -36,7 +37,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -63,6 +64,7 @@ class CTAMTestFullDeviceUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["Compliance", "L0"] compliance_level: str = "L0" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupNMinus1): """ @@ -92,7 +94,7 @@ def run(self) -> TestResult: step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore with step1.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck() - failure_reason += status_msg + failure_reason = status_msg if not status: step1.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -103,21 +105,21 @@ def run(self) -> TestResult: step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2") # type: ignore with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(is_force_update=False) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step2.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " " + "FW Update Stage Failed" + failure_reason = "FW Update Stage Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -127,14 +129,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " " + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -144,7 +146,7 @@ def run(self) -> TestResult: step4.add_log( LogSeverity.INFO, f"{self.test_id} : Update Verification Failed" ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_install_same_image_two_times.py b/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_install_same_image_two_times.py index ff09292..2826ad3 100644 --- a/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_install_same_image_two_times.py +++ b/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_install_same_image_two_times.py @@ -7,6 +7,7 @@ :Test ID: F16 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case involves performing a full device update including verification followed by repeating the update process again. @@ -38,7 +39,7 @@ """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +66,7 @@ class CTAMTestInstallSameImageTwoTimes(TestCase): score_weight: int = 10 tags: List[str] = ["L1"] compliance_level: str = "L1" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupNMinus1): """ @@ -113,7 +115,7 @@ def run(self) -> TestResult: break with step2.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step2.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Staged" @@ -123,14 +125,14 @@ def run(self) -> TestResult: LogSeverity.FATAL, f"{self.test_id} : FW Update Staged Failed", ) - failure_reason += " " + "FW Update Staged Failed" + failure_reason = "FW Update Staged Failed" result = False if result: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore with step3.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -140,14 +142,14 @@ def run(self) -> TestResult: LogSeverity.FATAL, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += "" + "FW Update Activation Failed" + failure_reason = "FW Update Activation Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4") with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, @@ -158,7 +160,7 @@ def run(self) -> TestResult: LogSeverity.FATAL, f"{self.test_id} : Update Verification Failed", ) - failure_reason += " " + "Update Verification Failed" + failure_reason = "Update Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_single_device_update.py b/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_single_device_update.py index 3d22ffa..d596024 100644 --- a/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_single_device_update.py +++ b/ctam/tests/fw_update/fw_update_group_N_1/ctam_test_single_device_update.py @@ -7,6 +7,7 @@ :Test ID: F4 :Group Name: fw_update :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: Basic test case of Single Device firmware update. All updatable devices are updated and activated, one device at a time. Any device fail would lead to test case fail. @@ -37,7 +38,7 @@ Optional - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -63,6 +64,7 @@ class CTAMTestSingleDeviceUpdate(TestCase): score_weight: int = 10 tags: List[str] = ["L3", "Single_Device"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: FWUpdateTestGroupNMinus1): """ @@ -101,13 +103,13 @@ def run(self) -> TestResult: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Single Device Selected") else: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Single Device Selection Failed") - failure_reason += f"{self.test_id} : Single Device Selection Failed" + failure_reason += "Single Device Selection Failed" result = False step2 = self.test_run().add_step(f"{self.__class__.__name__} run(), step2_{device}") # type: ignore with step2.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_precheck() - failure_reason += " " + status_msg + failure_reason = status_msg if not status: step2.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Capable") else: @@ -118,21 +120,21 @@ def run(self) -> TestResult: step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3_{device}") # type: ignore with step3.scope(): status, status_msg, task_id = self.group.fw_update_ifc.ctam_stage_fw(partial=1, specific_targets=[device]) - failure_reason += " " + status_msg + failure_reason = status_msg if status: step3.add_log(LogSeverity.INFO, f"{self.test_id} : FW Update Staged") else: step3.add_log( LogSeverity.ERROR, f"{self.test_id} : FW Update Stage Failed" ) - failure_reason += " FW Update Stage Failed" + failure_reason = " FW Update Stage Failed" result = False if result: step4 = self.test_run().add_step(f"{self.__class__.__name__} run(), step4_{device}") # type: ignore with step4.scope(): status, status_msg = self.group.fw_update_ifc.ctam_activate_ac() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step4.add_log( LogSeverity.INFO, f"{self.test_id} : FW Update Activate" @@ -142,14 +144,14 @@ def run(self) -> TestResult: LogSeverity.ERROR, f"{self.test_id} : FW Update Activation Failed", ) - failure_reason += " FW Update Activation Failed" + failure_reason = " FW Update Activation Failed" result = False if result: step5 = self.test_run().add_step(f"{self.__class__.__name__} run(), step5_{device}") with step5.scope(): status, status_msg = self.group.fw_update_ifc.ctam_fw_update_verify() - failure_reason += " " + status_msg + failure_reason = status_msg if status: step5.add_log( LogSeverity.INFO, @@ -159,7 +161,7 @@ def run(self) -> TestResult: step5.add_log( LogSeverity.INFO, f"{self.test_id} : Update Verification Failed" ) - failure_reason += " Update Verification Failed" + failure_reason = " Update Verification Failed" result = False if result: @@ -173,11 +175,11 @@ def run(self) -> TestResult: step6.add_log(LogSeverity.INFO, f"{failed_devices} : Failed to update devices") if len(failed_devices) !=0: result = False - failure_reason += " " + "Failed to update devices" + failure_reason = "Failed to update devices" else: step6 = self.test_run().add_step(f"{self.__class__.__name__} run(), step6") step6.add_log(LogSeverity.INFO, f"{self.test_id} : No updatable devices, exiting") - failure_reason += "No updatable devices, exiting" + failure_reason = "No updatable devices, exiting" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_dump_uri_list_read.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_dump_uri_list_read.py index 4de2fd6..9bda8e4 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_dump_uri_list_read.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_dump_uri_list_read.py @@ -7,6 +7,7 @@ :Test ID: H97 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test verifies the ability to read the list of dump URIs from the LogService. It performs the following steps: @@ -29,7 +30,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from pprint import pprint from ocptv.output import ( @@ -56,6 +57,7 @@ class CTAMTestLogServiceDumpURIListRead(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: BasicHealthCheckTestGroup): """ @@ -89,7 +91,7 @@ def run(self) -> TestResult: LogSeverity.FATAL, f"{self.test_id} : Redfish LogService Dump URI list Read Failed - Dump list is empty", ) - failure_reason += "Redfish LogService Dump URI list Read Failed - Dump list is empty" + failure_reason += "Dump URI list read failed" result = False else: #pprint(dump_uri) @@ -106,7 +108,7 @@ def run(self) -> TestResult: step2.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish LogService Dump URI list Verification - Passed") else: step2.add_log(LogSeverity.ERROR,f"{self.test_id} : Redfish LogService Dump URI list Verification - Failed") - failure_reason += "Redfish LogService Dump URI list Verification - Failed" + failure_reason = "Dump URI list verification failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_uri_list_read.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_uri_list_read.py index 0f61e77..9f13995 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_uri_list_read.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_logservice_uri_list_read.py @@ -24,7 +24,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from pprint import pprint from ocptv.output import ( @@ -52,6 +52,7 @@ class CTAMTestLogServicesURIListRead(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: BasicHealthCheckTestGroup): """ @@ -85,7 +86,7 @@ def run(self) -> TestResult: LogSeverity.FATAL, f"{self.test_id} : Redfish LogService URI list Read Failed - LogService list is empty", ) - failure_reason += "Redfish LogService URI list Read Failed - LogService list is empty" + failure_reason += "LogService URI list read failed" result = False else: #pprint(logservice) @@ -101,7 +102,7 @@ def run(self) -> TestResult: step2.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish LogService URI list Verification - Passed") else: step2.add_log(LogSeverity.ERROR,f"{self.test_id} : Redfish LogService URI list Verification - Failed") - failure_reason += "Redfish LogService URI list Verification - Failed" + failure_reason += "LogService URI list Verification - Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service.py index dd6211a..acde5a8 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service.py @@ -7,6 +7,7 @@ :Test ID: H8 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to get the event service and verifies its presence. @@ -22,7 +23,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -49,6 +50,7 @@ class CTAMTestRedfishEventService(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_create_subscription.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_create_subscription.py index 1a239d3..f92bd41 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_create_subscription.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_create_subscription.py @@ -7,6 +7,7 @@ :Test ID: H83 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to create an event service subscription and verifies its success. @@ -22,7 +23,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -49,6 +50,7 @@ class CTAMTestRedfishEventServiceCreateSubscription(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -86,7 +88,7 @@ def run(self) -> TestResult: Protocol="Redfish") if JSONData is None or "error" in JSONData: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Redfish Event Service Check - Failed") - failure_reason += "Redfish Event Service Check - Failed" + failure_reason += "Event Service subscription creation failed" result = False else: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish Event Service Check - Completed") diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_delete_subscription.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_delete_subscription.py index 056b055..b9f7479 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_delete_subscription.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_delete_subscription.py @@ -7,6 +7,7 @@ :Test ID: H81 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to delete event subscriptions and verifies its success. @@ -22,7 +23,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -49,6 +50,7 @@ class CTAMTestRedfishEventServiceDeleteSubscription(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -82,7 +84,7 @@ def run(self) -> TestResult: result = self.group.health_check_ifc.ctam_deles() if result is False: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Redfish Event Service Delete Subscriptions Check - Failed") - failure_reason += "Redfish Event Service Delete Subscriptions Check - Failed" + failure_reason += "Event Service subscription deletion failed" else: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish Event Service Delete Subscriptions Check - Completed") diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_list_subscription.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_list_subscription.py index a3165fa..bb33306 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_list_subscription.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_event_service_list_subscription.py @@ -7,6 +7,7 @@ :Test ID: H80 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to get event subscriptions and verifies their presence. @@ -22,7 +23,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -49,6 +50,7 @@ class CTAMTestRedfishEventServiceSubscription(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -82,7 +84,7 @@ def run(self) -> TestResult: JSONData = self.group.health_check_ifc.ctam_getes("Subscriptions") if JSONData is None or len(JSONData) == 0: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Redfish Event Service Subscriptions Check - Failed") - failure_reason += "Redfish Event Service Subscriptions Check - Failed" + failure_reason += "Event Service subscriptions retrieval failed" result = False else: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish Event Service Subscriptions Check - Completed") diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_collection.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_collection.py index 812a8cd..1e23446 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_collection.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_collection.py @@ -7,6 +7,7 @@ :Test ID: H5 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case validates the Firmware Inventory using the Redfish Interop Validator (RIV). It ensures that the values of the Firmware Inventory Collection are present and correct. @@ -22,7 +23,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from test_hierarchy import TestHierarchy import os @@ -37,6 +38,7 @@ BasicHealthCheckTestGroup, ) from utils.ctam_utils import GitUtils +from interfaces.functional_ifc import FunctionalIfc class CTAMTestRedfishInteropValidatorFirmwareInventory(TestCase): """ @@ -51,6 +53,7 @@ class CTAMTestRedfishInteropValidatorFirmwareInventory(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L1"] compliance_level: str ="L1" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -80,7 +83,8 @@ def run(self) -> TestResult: """ failure_reason = "" result = True - logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator", f"{self.__class__.test_id}_{self.__class__.__name__}") + logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator") + # logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator", f"{self.__class__.test_id}_{self.__class__.__name__}") #cloning Redfish Interop Validator under temp folder which will be deleted after completion of test case. step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore @@ -106,6 +110,7 @@ def run(self) -> TestResult: payload="NodeTree /redfish/v1/UpdateService/FirmwareInventory", profile=json_file_path, ) + self.group.health_check_ifc.flatten_validator_output(self.__class__.test_id, self.__class__.__name__, logger_path) if not result: step2.add_log(LogSeverity.ERROR, f"Validation has failed: {error_count} problems found") failure_reason += f"Validation has failed: {error_count} problems found" @@ -119,8 +124,8 @@ def run(self) -> TestResult: # call super last to log result and score super().run() - return self.result, failure_reason - + return self.result, failure_reason + def teardown(self): """ undo environment state change from setup() above, this function is called even if run() fails or raises exception diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_expanded_collection.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_expanded_collection.py index 898659b..025bef0 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_expanded_collection.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_firmware_inventory_expanded_collection.py @@ -7,6 +7,7 @@ :Test ID: H6 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to get the expanded firmware inventory from the update service and verifies its presence and correctness. @@ -24,7 +25,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -51,6 +52,7 @@ class CTAMTestRedfishFirmwareInventoryExpandedCollection(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -84,7 +86,7 @@ def run(self) -> TestResult: JSONData = self.group.health_check_ifc.ctam_getfi(expanded=1) if JSONData is None or "error" in JSONData: step1.add_log(LogSeverity.ERROR, f"{self.test_id} : Redfish FW Inventory Expanded Collection Read - Failed") - failure_reason += "Redfish FW Inventory Expanded Collection Read - Failed" + failure_reason += "Expanded Collection Read Failed" result = False else: step1.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish FW Inventory Expanded Collection Read - Completed") @@ -96,7 +98,7 @@ def run(self) -> TestResult: step2.add_log(LogSeverity.INFO, f"{self.test_id} : Redfish FW Inventory Expanded Collection Verification - Passed") else: step2.add_log(LogSeverity.ERROR,f"{self.test_id} : Redfish FW Inventory Expanded Collection Verification - Failed") - failure_reason += "Redfish FW Inventory Expanded Collection Verification - Failed" + failure_reason += "Expanded Collection Verification Failed" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_processors_expanded_collection.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_processors_expanded_collection.py index 27b3993..1ffb42f 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_processors_expanded_collection.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_processors_expanded_collection.py @@ -7,6 +7,7 @@ :Test ID: H51 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to get the expanded Processor inventory from the update service and verifies its presence and correctness. @@ -28,7 +29,7 @@ : Required - Optional - None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -55,6 +56,7 @@ class CTAMTestRedfishProcessorExpandedCollection(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_software_inventory_expanded_collection.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_software_inventory_expanded_collection.py index 9b0bd84..bd6670e 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_software_inventory_expanded_collection.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_software_inventory_expanded_collection.py @@ -7,6 +7,7 @@ :Test ID: H11 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to get the expanded Software inventory from the update service and verifies its presence and correctness. @@ -24,7 +25,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -51,6 +52,7 @@ class CTAMTestRedfishSoftwareInventoryExpandedCollection(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_task_service.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_task_service.py index c7a506a..8e9e680 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_task_service.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_task_service.py @@ -7,6 +7,7 @@ :Test ID: H10 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case attempts to get the task service and verifies its presence. @@ -22,7 +23,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -49,6 +50,7 @@ class CTAMTestRedfishTaskService(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_telemetry_service.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_telemetry_service.py index 358b2c7..d87f9ee 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_telemetry_service.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_telemetry_service.py @@ -7,6 +7,7 @@ :Test ID: H7 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test verifies the availability and correctness of the Redfish Telemetry Service. The test attempts to retrieve telemetry data from the service and checks for errors. @@ -20,7 +21,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -47,6 +48,7 @@ class CTAMTestRedfishTelemetryService(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] diff --git a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_update_service.py b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_update_service.py index 8454e89..8c5557a 100644 --- a/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_update_service.py +++ b/ctam/tests/health_check/basic_health_check_group/ctam_test_redfish_update_service.py @@ -7,6 +7,7 @@ :Test ID: H4 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test verifies the availability and correctness of the Redfish Update Service. The test attempts to retrieve update service data from the service and checks for errors. @@ -20,7 +21,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from test_hierarchy import TestHierarchy import os @@ -35,6 +36,7 @@ BasicHealthCheckTestGroup, ) from utils.ctam_utils import GitUtils +from interfaces.health_check_ifc import HealthCheckIfc class CTAMTestRedfishUpdateService(TestCase): @@ -50,6 +52,7 @@ class CTAMTestRedfishUpdateService(TestCase): score_weight: int = 10 tags: List[str] = ["HCheck", "L2"] compliance_level: str ="L2" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -79,7 +82,8 @@ def run(self) -> TestResult: """ failure_reason = "" result = True - logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator", f"{self.__class__.test_id}_{self.__class__.__name__}") + logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator") + # logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator", f"{self.__class__.test_id}_{self.__class__.__name__}") #cloning Redfish Interop Validator under temp folder which will be deleted after completion of test case. step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore @@ -105,6 +109,7 @@ def run(self) -> TestResult: payload="NodeTree /redfish/v1/UpdateService", profile=json_file_path, ) + self.group.health_check_ifc.flatten_validator_output(self.__class__.test_id, self.__class__.__name__, logger_path) if not result: step2.add_log(LogSeverity.ERROR, f"Validation has failed: {error_count} problems found") failure_reason += f"Validation has failed: {error_count} problems found" diff --git a/ctam/tests/health_check/long_health_check_group/ctam_test_ac_cycles_in_loop.py b/ctam/tests/health_check/long_health_check_group/ctam_test_ac_cycles_in_loop.py index 99298a0..1396394 100644 --- a/ctam/tests/health_check/long_health_check_group/ctam_test_ac_cycles_in_loop.py +++ b/ctam/tests/health_check/long_health_check_group/ctam_test_ac_cycles_in_loop.py @@ -7,6 +7,7 @@ :Test ID: H100 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: AC Cycle is essential for activation flow of firmware update and many other flows. This test runs AC cycles in a loop to test platform stability. This is a prerequisite @@ -29,7 +30,7 @@ Optional - , """ -from typing import List +from typing import List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -52,6 +53,7 @@ class CTAMTestAcCyclesInLoop(TestCase): score_weight:int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: LongHealthCheckTestGroup): """ @@ -87,7 +89,7 @@ def run(self) -> TestResult: else: msg = f"{self.test_id} : AC Cycle Failed Loop {i}" self.test_run().add_log(LogSeverity.DEBUG, msg) - failure_reason += msg + failure_reason += f"AC Cycle Failed Loop {i}" result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/tests/health_check/long_health_check_group/ctam_test_logservice_dump_clearlog.py b/ctam/tests/health_check/long_health_check_group/ctam_test_logservice_dump_clearlog.py index 9673249..4c928ba 100644 --- a/ctam/tests/health_check/long_health_check_group/ctam_test_logservice_dump_clearlog.py +++ b/ctam/tests/health_check/long_health_check_group/ctam_test_logservice_dump_clearlog.py @@ -7,6 +7,7 @@ :Test ID: H96 :Group Name: health_check :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: Basic test case to clear all entries of all instances of LogService Dumps. This test ensures that all log entries are cleared successfully. @@ -20,7 +21,7 @@ :Dependencies: None """ -from typing import List +from typing import List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -45,6 +46,7 @@ class CTAMTestLogserviceDumpClearlog(TestCase): score_weight:int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: LongHealthCheckTestGroup): """ diff --git a/ctam/tests/ras/ctam_test_collect_crashdump_manager.py b/ctam/tests/ras/ctam_test_collect_crashdump_manager.py index 2e32f10..3e01610 100644 --- a/ctam/tests/ras/ctam_test_collect_crashdump_manager.py +++ b/ctam/tests/ras/ctam_test_collect_crashdump_manager.py @@ -7,6 +7,7 @@ :Test ID: R2 :Group Name: ras :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test verifies the functionality of collecting crashdump diagnostic data for a specific manager in the system. It performs the following steps: @@ -38,7 +39,7 @@ : Required - """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -65,6 +66,7 @@ class CTAMTestCollectCrashdumpManager(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] diff --git a/ctam/tests/ras/ctam_test_discover_crashdump.py b/ctam/tests/ras/ctam_test_discover_crashdump.py index eef1130..c033736 100644 --- a/ctam/tests/ras/ctam_test_discover_crashdump.py +++ b/ctam/tests/ras/ctam_test_discover_crashdump.py @@ -7,6 +7,7 @@ :Test ID: R1 :Group Name: ras :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case ensures that there is at least one LogService ID under both Systems and Managers. It then checks if at least one of them has the LogService.CollectDiagnosticData action available. The test returns a list of URIs @@ -27,7 +28,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from pprint import pprint from ocptv.output import ( @@ -55,6 +56,7 @@ class CTAMTestDiscoverCrashdump(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" # exclude_tags: List[str] = ["NotCheck"] @@ -90,7 +92,7 @@ def run(self) -> TestResult: LogSeverity.FATAL, f"{self.test_id} : Test case Failed - CollectDiagnostic list is empty", ) - failure_reason += "Test case Failed - CollectDiagnostic list is empty." + failure_reason += "CollectDiagnostic list is empty." result = False else: print(collectdata) diff --git a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_interop_validator.py b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_interop_validator.py index a1a4755..f445309 100644 --- a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_interop_validator.py +++ b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_interop_validator.py @@ -7,6 +7,7 @@ :Test ID: T97 :Group Name: Telemetry :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case will clone the Redfish Interop Validator (RIV) in a temporary folder and take JSON profiles as input. It will then run the RIV using these profiles to validate the Redfish service. @@ -24,9 +25,10 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from test_hierarchy import TestHierarchy +from interfaces.telemetry_ifc import TelemetryIfc import os from ocptv.output import ( DiagnosisType, @@ -52,6 +54,7 @@ class CTAMTestRedfishInteropValidator(TestCase): score_weight: int = 10 tags: List[str] = ["L0"] compliance_level: str = "L0" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: BasicTelemetryTestGroup): """ @@ -79,7 +82,8 @@ def run(self) -> TestResult: """ result = True failure_reason = "" - logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator", f"{self.__class__.test_id}_{self.__class__.__name__}") + logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator") + # logger_path = os.path.join(self.dut().logger_path, "RedfishInteropValidator", f"{self.__class__.test_id}_{self.__class__.__name__}") #cloning Redfish Interop Validator under temp folder which will be deleted after completion of test case. step1 = self.test_run().add_step(f"{self.__class__.__name__} run(), step1") # type: ignore @@ -104,6 +108,7 @@ def run(self) -> TestResult: log_path=logger_path, passthrough=base_uri, profile=json_file_path ) + self.group.telemetry_ifc.flatten_validator_output(self.__class__.test_id, self.__class__.__name__, logger_path) if not result: step2.add_log(LogSeverity.ERROR, f"Validation has failed: {error_count} problems found") failure_reason += f"Validation has failed: {error_count} problems found" diff --git a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_service_validator.py b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_service_validator.py index b1aee05..e451923 100644 --- a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_service_validator.py +++ b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_redfish_service_validator.py @@ -7,6 +7,7 @@ :Test ID: T0 :Group Name: Telemetry :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This testcase will clone the RedfishServiceValidator repository and It will validate all of the available URIs under redfish. @@ -25,7 +26,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase import os from ocptv.output import ( @@ -47,11 +48,12 @@ class CTAMTestServiceValidator(TestCase): :type TestCase: """ - test_name: str = "CTAM Test Service Validator" + test_name: str = "CTAM Test Redfish Service Validator" test_id: str = "T0" score_weight: int = 10 tags: List[str] = ["L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: BasicTelemetryTestGroup): """ @@ -88,7 +90,7 @@ def run(self) -> TestResult: repo_path="RedfishServiceValidator") if not result: step1.add_log(LogSeverity.ERROR, f"Cloning repo for Redfish Service Validator failed.") - failure_reason += "Cloning repo for Redfish Service Validator failed. " + failure_reason += "Cloning repo failed. " step1.add_log(LogSeverity.INFO, f"Cloning repo for Redfish Service Validator successful.") if result: @@ -109,7 +111,7 @@ def run(self) -> TestResult: service_uri="/redfish/v1") if not result: step2.add_log(LogSeverity.ERROR, f"Something went wrong while running redfish command. Please see error msg {msg}.") - failure_reason += f"Something went wrong while running redfish command. Please see error msg {msg}." + failure_reason += "Error occurred running Redfish command." step2.add_log(LogSeverity.INFO, f"Redfish Service Command ran successfully and validated.") step3 = self.test_run().add_step(f"{self.__class__.__name__} run(), step3") # type: ignore diff --git a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_list_read.py b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_list_read.py index 3f5b442..f1915c0 100644 --- a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_list_read.py +++ b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_list_read.py @@ -25,7 +25,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from ocptv.output import ( DiagnosisType, @@ -45,6 +45,7 @@ class CTAMTestTelemetryMRListRead(TestCase): score_weight: int = 10 tags: List[str] = ["L3"] compliance_level: str = "L3" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: BasicTelemetryTestGroup): """ diff --git a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_read.py b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_read.py index 3b389a2..ae403fa 100644 --- a/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_read.py +++ b/ctam/tests/telemetry/basic_telemetry_group/ctam_test_telemetry_mr_read.py @@ -7,6 +7,7 @@ :Test ID: T4 :Group Name: Telemetry :Score Weight: 10 +:Spec Versions: ">= 1.0" :Description: This test case discovers the list of all metric reports available on the Device Under Test (DUT) and prints their details. It ensures that the telemetry interface can retrieve and display the @@ -25,7 +26,7 @@ :Dependencies: None """ -from typing import Optional, List +from typing import Optional, List, Union from tests.test_case import TestCase from prettytable import PrettyTable from ocptv.output import ( @@ -46,6 +47,7 @@ class CTAMTestTelemetryMRRead(TestCase): score_weight: int = 10 tags: List[str] = ["L2"] compliance_level: str = "L2" + spec_versions: Union[str, List[str]] = ">= 1.0" def __init__(self, group: BasicTelemetryTestGroup): """ @@ -84,7 +86,7 @@ def run(self) -> TestResult: self.test_run().add_log(LogSeverity.INFO, msg) else: self.test_run().add_log(LogSeverity.FATAL, "Could not extract the Metric Reports. Proceed with manual debug") - failure_reason += "Could not extract the Metric Reports. Proceed with manual debug" + failure_reason += "Could not extract Metric Reports." result = False # ensure setting of self.result and self.score prior to calling super().run() diff --git a/ctam/utils/ctam_utils.py b/ctam/utils/ctam_utils.py index 2ad0463..c9b96d1 100644 --- a/ctam/utils/ctam_utils.py +++ b/ctam/utils/ctam_utils.py @@ -146,7 +146,7 @@ def validate_redfish_service(self, file_name, connection_url, user_name, user_pa data = result.replace("\r", "").split("\n")[-1] s_idx = result.index("Elapsed time:") data = result[s_idx:] - res = re.findall(r"pass:\s+(\d+)", data) + res = re.findall(r"(?:Pass|pass):\s+(\d+)", data) if res and res[0].isdigit() and int(res[0]) > 0: return True, "PASS" return False, "FAIL" @@ -213,7 +213,7 @@ def ctam_run_dmtf_command(cls, command): """ try: command = repr(command)[1:-1] - with subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) as process: + with subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, encoding='utf-8') as process: # Set up a progress bar; assume you know the number of iterations (like 4 for 4 pings) def read_stream(stream, buffer): @@ -251,6 +251,12 @@ def read_stream(stream, buffer): stdout_thread.join() stderr_thread.join() + # Filter out warning lines from stderr + stderr_lines = [ + line for line in stderr_lines + if "Warning" not in line and "warnings.warn" not in line + ] + if stderr_lines: print(stderr_lines) return False, stderr_lines diff --git a/ctam/version.py b/ctam/version.py index e78a3dd..934c5ba 100644 --- a/ctam/version.py +++ b/ctam/version.py @@ -7,4 +7,4 @@ """ -__version__ = "1.1.1" +__version__ = "1.1.2" diff --git a/json_spec/input/gpu_pldm_pkg_info.json b/json_spec/input/gpu_pldm_pkg_info.json deleted file mode 100644 index a6d4bab..0000000 --- a/json_spec/input/gpu_pldm_pkg_info.json +++ /dev/null @@ -1,408 +0,0 @@ -{ - "PackageHeaderInformation": { - "PackageHeaderIdentifier":, - "PackageHeaderFormatVersion":, - "PackageReleaseDateTime":, - "PackageVersionString": - }, - "FirmwareDeviceRecords": [ - { - "ComponentImageSetVersionString":, - "DeviceDescriptors": [ - { - "InitialDescriptorData":, - "InitialDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - } - ], - "Components": [ - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 182912 - }, - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 67105792, - "AP_SKU_ID": - } - ] - }, - { - "ComponentImageSetVersionString":, - "DeviceDescriptors": [ - { - "InitialDescriptorData":, - "InitialDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - } - ], - "Components": [ - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 182912 - }, - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 32117760, - "AP_SKU_ID": - } - ] - }, - { - "ComponentImageSetVersionString":, - "DeviceDescriptors": [ - { - "InitialDescriptorData":, - "InitialDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - } - ], - "Components": [ - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 182912, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - }, - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 975872, - "AP_SKU_ID":, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - } - ] - }, - { - "ComponentImageSetVersionString":, - "DeviceDescriptors": [ - { - "InitialDescriptorData":, - "InitialDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - } - ], - "Components": [ - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 182912, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - }, - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 975872, - "AP_SKU_ID":, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - } - ] - }, - { - "ComponentImageSetVersionString":, - "DeviceDescriptors": [ - { - "InitialDescriptorData":, - "InitialDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "AdditionalDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - } - ], - "Components": [ - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 182912, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - }, - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 4461824, - "AP_SKU_ID":, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - } - ] - }, - { - "ComponentImageSetVersionString":, - "DeviceDescriptors": [ - { - "InitialDescriptorData":, - "InitialDescriptorType": - }, - { - "VendorDefinedDescriptorTitleString":, - "VendorDefinedDescriptorData":, - "AdditionalDescriptorType": - } - ], - "Components": [ - { - "ComponentIdentifier":, - "ComponentVersionString":, - "FWImage":, - "FWImageSHA256":, - "FWImageSize": 262144, - "UpdateOutband":, - "UpdateInband":, - "SubIngredientVersions": [ - { - "ingredientname1": - }, - { - "ingredientname2": - } - ] - } - ] - } - ] -} \ No newline at end of file diff --git a/json_spec/input/FirmwareInventory.json b/json_spec/input/spec_1.0/FirmwareInventory.json similarity index 100% rename from json_spec/input/FirmwareInventory.json rename to json_spec/input/spec_1.0/FirmwareInventory.json diff --git a/json_spec/input/OCP_UBB_BaselineManagement.v1.0.0.json b/json_spec/input/spec_1.0/OCP_UBB_BaselineManagement.v1.0.0.json similarity index 100% rename from json_spec/input/OCP_UBB_BaselineManagement.v1.0.0.json rename to json_spec/input/spec_1.0/OCP_UBB_BaselineManagement.v1.0.0.json diff --git a/json_spec/input/UpdateService.json b/json_spec/input/spec_1.0/UpdateService.json similarity index 100% rename from json_spec/input/UpdateService.json rename to json_spec/input/spec_1.0/UpdateService.json diff --git a/json_spec/input/spec_1.0/spec_bindings.json b/json_spec/input/spec_1.0/spec_bindings.json new file mode 100644 index 0000000..209013d --- /dev/null +++ b/json_spec/input/spec_1.0/spec_bindings.json @@ -0,0 +1,5 @@ +{ +"FWUpdate":"1.1", +"Interfaces":"1.4", +"RAS":"1.7" +} \ No newline at end of file diff --git a/json_spec/input/.netrc b/json_spec/input/spec_1.0/workspace/.netrc similarity index 100% rename from json_spec/input/.netrc rename to json_spec/input/spec_1.0/workspace/.netrc diff --git a/json_spec/input/dut_info.json b/json_spec/input/spec_1.0/workspace/dut_info.json similarity index 100% rename from json_spec/input/dut_info.json rename to json_spec/input/spec_1.0/workspace/dut_info.json diff --git a/json_spec/input/package_info.json b/json_spec/input/spec_1.0/workspace/package_info.json similarity index 100% rename from json_spec/input/package_info.json rename to json_spec/input/spec_1.0/workspace/package_info.json diff --git a/json_spec/input/redfish_response_messages.json b/json_spec/input/spec_1.0/workspace/redfish_response_messages.json similarity index 100% rename from json_spec/input/redfish_response_messages.json rename to json_spec/input/spec_1.0/workspace/redfish_response_messages.json diff --git a/json_spec/input/redfish_uri_config.json b/json_spec/input/spec_1.0/workspace/redfish_uri_config.json similarity index 100% rename from json_spec/input/redfish_uri_config.json rename to json_spec/input/spec_1.0/workspace/redfish_uri_config.json diff --git a/json_spec/input/test_runner.json b/json_spec/input/spec_1.0/workspace/test_runner.json similarity index 73% rename from json_spec/input/test_runner.json rename to json_spec/input/spec_1.0/workspace/test_runner.json index b6455a3..9b7bde3 100644 --- a/json_spec/input/test_runner.json +++ b/json_spec/input/spec_1.0/workspace/test_runner.json @@ -3,6 +3,7 @@ "$id": "Need to add one", "title": "Test Runner Configuration", "description": "Configure the TestRunner obj, list of test cases overrides a test suite", + "spec_version": "1.0", "output_override_directory": "", "debug_mode": true, "console_log": true, @@ -20,10 +21,10 @@ }, "active_test_suite": [], "dev_test_suite": [], - "full_compliance_test_suite": [], + "full_compliance_test_suite": ["F0", "PROF", "F1", "PROF", "T97", "H5", "F8", "PROF", "F16", "PROF", "F88", "PROF", "F63", "F5", "F64", "F2", "F18", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "H83", "H81", "H8", "H6", "H4", "T0", "T4"], "regression_test_suite_0": ["F0","F1", "F63", "F64", "F8", "F88", "T0"], "regression_test_suite_1": ["F0", "F1", "F23", "F26", "F28", "F27", "F22", "F25", "F18", "F32"], "regression_test_suite_2": ["F0", "F1", "F16", "F19", "F55", "F62", "F89"], - "regression_test_suite_3": ["F0","F1", "F8", "F16", "F18", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F32", "F55", "F63", "F64", "F88", "R1", "R2", "R3", "T0", "T2", "T4"], + "regression_test_suite_3": ["F0","F1", "F8", "F16", "F18", "F22", "F23", "PROF", "F24", "F25", "F26", "F27", "F28", "F32", "F55", "F63", "F64", "F88", "R1", "R2", "T0", "T2", "T4"], "test_uri_response_excel":"Excel file name for checking the response" } \ No newline at end of file diff --git a/json_spec/input/spec_1.1/FirmwareInventory.json b/json_spec/input/spec_1.1/FirmwareInventory.json new file mode 100644 index 0000000..f6c8fd1 --- /dev/null +++ b/json_spec/input/spec_1.1/FirmwareInventory.json @@ -0,0 +1,77 @@ +{ + "SchemaDefinition": "RedfishInteroperabilityProfile.v1_6_0", + "ProfileName": "OCPGPUManagement", + "ProfileVersion": "1.0.0", + "Purpose": "Specifies the OCP baseline hardware management requirements for the Redfish interface on GPU platforms.", + "OwningEntity": "Open Compute Project", + "ContributedBy": "Google, Meta, Microsoft, NVidia, AMD", + "License": "Creative Commons Attribution-ShareAlike 4.0 International License", + "ContactInfo": "", + "Protocol": { + "MinVersion": "1.0" + }, + "Resources": { + "SoftwareInventory":{ + "MinVersion": "1.0.0", + "ReadRequirement": "Recommended", + "PropertyRequirements":{ + "Id":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "Name":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "Status":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added", + "PropertyRequirements":{ + "State":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added", + "Values":["Enabled", "Disabled"] + }, + "Health":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added", + "Values":["OK", "Warning", "Critical"] + } + } + }, + "SoftwareId":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "Manufacturer":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "Updateable":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "WriteProtected":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "Version":{ + "ReadRequirement":"Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + } + } + } + }, + "Registries": { + "Base": { + "MinVersion": "1.0.0", + "Repository": "redfish.dmtf.org/registries", + "Messages": { + "Success": {}, + "GeneralError": {}, + "Created": {}, + "PropertyDuplicate": {} + } + } + } +} \ No newline at end of file diff --git a/json_spec/input/spec_1.1/OCP_UBB_BaselineManagement.v1.0.0.json b/json_spec/input/spec_1.1/OCP_UBB_BaselineManagement.v1.0.0.json new file mode 100644 index 0000000..19143ce --- /dev/null +++ b/json_spec/input/spec_1.1/OCP_UBB_BaselineManagement.v1.0.0.json @@ -0,0 +1,951 @@ +{ + "SchemaDefinition": "RedfishInteroperabilityProfile.v1_6_0", + "ProfileName": "OCP_UBB_BaselineManagement", + "ProfileVersion": "1.0.0", + "Purpose": "Specifies the OCP baseline hardware management requirements for the Redfish interface for GPU based Universal BaseBoard (UBB) platforms.", + "OwningEntity": "Open Compute Project", + "ContributedBy": "AMD, Google, Meta, Microsoft, NVIDIA", + "License": "Creative Commons Attribution-ShareAlike 4.0 International License", + "ContactInfo": "", + "Protocol": { + "MinVersion": "1.0" + }, + "Resources": { + "Assembly": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Assemblies": { + "ReadRequirement": "Mandatory" + } + } + }, + "Certificate": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "CertificateString": { + "ReadRequirement": "Mandatory" + }, + "CertificateType": { + "ReadRequirement": "Mandatory" + }, + "CertificateUsageTypes": { + "ReadRequirement": "Mandatory" + } + } + }, + "Chassis": { + "MinVersion": "1.0.0", + "ReadRequirement": "Supported", + "PropertyRequirements": { + "Assembly": { + "ReadRequirement": "Supported" + }, + "ChassisType": { + "ReadRequirement": "Recommended" + }, + "EnvironmentMetrics": { + "ReadRequirement": "Supported" + }, + "Links": { + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "ComputerSystems": { + "ReadRequirement": "Supported" + }, + "ContainedBy": { + "ReadRequirement": "Supported" + }, + "Contains": { + "ReadRequirement": "Supported" + }, + "ManagedBy": { + "ReadRequirement": "Supported" + }, + "Processors": { + "ReadRequirement": "Supported" + } + } + }, + "Manufacturer": { + "ReadRequirement": "Supported" + }, + "MaxPowerWatts": { + "ReadRequirement": "Supported" + }, + "MinPowerWatts": { + "ReadRequirement": "Supported" + }, + "Model": { + "ReadRequirement": "Supported" + }, + "PCIeDevices": { + "ReadRequirement": "Supported" + }, + "PartNumber": { + "ReadRequirement": "Supported" + }, + "PowerSubsystem": { + "ReadRequirement": "Supported" + }, + "SKU": { + "ReadRequirement": "Supported" + }, + "Sensors": { + "ReadRequirement": "Supported" + }, + "SerialNumber": { + "ReadRequirement": "Supported" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "ThermalSubsystem": { + "ReadRequirement": "Supported" + }, + "UUID": { + "ReadRequirement": "Supported" + } + } + }, + "ComputerSystem": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Links": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Chassis": { + "ReadRequirement": "Mandatory" + }, + "ManagedBy": { + "ReadRequirement": "Mandatory" + } + } + }, + "LogServices": { + "ReadRequirement": "Mandatory" + }, + "Memory": { + "ReadRequirement": "Mandatory" + }, + "Processors": { + "ReadRequirement": "Mandatory" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "SystemType": { + "ReadRequirement": "Mandatory" + } + } + }, + "EnvironmentMetrics": { + "MinVersion": "1.0.0", + "ReadRequirement": "Supported", + "PropertyRequirements": { + "EnergyJoules": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "DataSourceUri": { + "ReadRequirement": "Supported" + }, + "Reading": { + "ReadRequirement": "Supported" + } + } + }, + "PowerLimitWatts": { + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "ControlMode": { + "ReadRequirement": "Recommended" + }, + "SetPoint": { + "ReadRequirement": "Recommended" + } + } + }, + "TemperatureCelsius": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "DataSourceUri": { + "ReadRequirement": "Supported" + }, + "Reading": { + "ReadRequirement": "Supported" + } + } + } + } + }, + "LogEntry": { + "MinVersion": "1.0.0", + "ReadRequirement": "Supported", + "PropertyRequirements": { + "AdditionalDataURI": { + "ReadRequirement": "Supported" + }, + "Created": { + "ReadRequirement": "Recommended" + }, + "EntryType": { + "ReadRequirement": "Mandatory" + }, + "OriginOfCondition": { + "ReadRequirement": "Supported" + }, + "Message": { + "ReadRequirement": "Recommended" + }, + "MessageArgs": { + "ReadRequirement": "Supported" + }, + "MessageId": { + "ReadRequirement": "Supported" + }, + "Resolution": { + "ReadRequirement": "Supported" + }, + "Severity": { + "ReadRequirement": "Recommended" + } + } + }, + "LogEntryCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Description": { + "ReadRequirement": "Mandatory" + }, + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "LogService": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Description": { + "ReadRequirement": "Mandatory" + }, + "Entries": { + "ReadRequirement": "Recommended" + }, + "MaxNumberOfRecords": { + "ReadRequirement": "Recommended" + }, + "OverWritePolicy": { + "ReadRequirement": "Recommended" + } + } + }, + "LogServiceCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Description": { + "ReadRequirement": "Mandatory" + }, + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "Manager": { + "MinVersion": "1.0.0", + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "DateTime": { + "ReadRequirement": "Recommended" + }, + "DateTimeLocalOffset": { + "ReadRequirement": "Recommended" + }, + "Description": { + "ReadRequirement": "Mandatory" + }, + "FirmwareVersion": { + "ReadRequirement": "Recommended" + }, + "LastResetTime": { + "ReadRequirement": "Recommended" + }, + "Links": { + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "ManagerInChassis": { + "ReadRequirement": "Recommended" + } + } + }, + "ManagerType": { + "ReadRequirement": "Mandatory" + }, + "ServiceEntryPointUUID": { + "ReadRequirement": "Recommended" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "UUID": { + "ReadRequirement": "Recommended" + } + }, + "ActionRequirements": { + "Reset": { + "ReadRequirement": "Recommended", + "Parameters": {} + }, + "ResetToDefaults": { + "ReadRequirement": "Recommended", + "Parameters": { + "ResetType": { + "ParameterValues": [], + "RecommendedValues": [], + "ReadRequirement": "Recommended" + } + } + } + } + }, + "ManagerCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "Memory": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "AllowedSpeedsMHz": { + "ReadRequirement": "Mandatory" + }, + "CapacityMiB": { + "ReadRequirement": "Mandatory" + }, + "ErrorCorrection": { + "ReadRequirement": "Mandatory" + }, + "MemoryDeviceType": { + "ReadRequirement": "Mandatory" + }, + "Metrics": { + "ReadRequirement": "Mandatory" + } + } + }, + "MemoryCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "MemoryMetrics": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "BandwidthPercent": { + "ReadRequirement": "Mandatory" + } + } + }, + "MetricReport": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "MetricReportDefinition": { + "ReadRequirement": "Mandatory" + }, + "MetricValues": { + "ReadRequirement": "Mandatory" + } + } + }, + "MetricReportCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "MetricReportDefinition": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "MetricReport": { + "ReadRequirement": "Mandatory" + }, + "ReportUpdates": { + "ReadRequirement": "Mandatory" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "State": { + "ReadRequirement": "Recommended" + } + } + } + } + }, + "MetricReportDefinitionCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "PCIeDevice": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "DeviceType": { + "ReadRequirement": "Mandatory" + }, + "Manufacturer": { + "ReadRequirement": "Recommended" + }, + "Model": { + "ReadRequirement": "Recommended" + }, + "PCIeFunctions": { + "ReadRequirement": "Recommended" + }, + "PCIeInterface": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "MaxLanes": { + "ReadRequirement": "Mandatory" + }, + "MaxPCIeType": { + "ReadRequirement": "Mandatory" + } + } + }, + "PartNumber": { + "ReadRequirement": "Recommended" + }, + "SerialNumber": { + "ReadRequirement": "Recommended" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "UUID": { + "ReadRequirement": "Recommended" + } + } + }, + "PCIeDeviceCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "PCIeFunction": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "DeviceId": { + "ReadRequirement": "Mandatory" + }, + "SubsystemId": { + "ReadRequirement": "Mandatory" + }, + "SubsystemVendorId": { + "ReadRequirement": "Mandatory" + }, + "VendorId": { + "ReadRequirement": "Mandatory" + } + } + }, + "Port": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "CurrentSpeedGbps": { + "ReadRequirement": "Mandatory" + }, + "LinkState": { + "ReadRequirement": "Recommended" + }, + "LinkStatus": { + "ReadRequirement": "Recommended" + }, + "MaxSpeedGbps": { + "ReadRequirement": "Recommended" + }, + "Metrics": { + "ReadRequirement": "Mandatory" + }, + "PortProtocol": { + "ReadRequirement": "Mandatory" + }, + "PortType": { + "ReadRequirement": "Mandatory" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + } + } + }, + "PortMetrics": { + "MinVersion": "1.0.0", + "ReadRequirement": "Supported", + "PropertyRequirements": { + "RXBytes": { + "ReadRequirement": "Supported" + }, + "TXBytes": { + "ReadRequirement": "Supported" + } + } + }, + "Processor": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "EnvironmentMetrics": { + "ReadRequirement": "Mandatory" + }, + "Links": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Chassis": { + "ReadRequirement": "Mandatory" + }, + "Memory": { + "ReadRequirement": "Recommended" + }, + "PCIeDevice": { + "ReadRequirement": "Recommended" + } + } + }, + "Location": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "PartLocation": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "ServiceLabel": { + "ReadRequirement": "Recommended" + } + } + } + } + }, + "Manufacturer": { + "ReadRequirement": "Mandatory" + }, + "MemorySummary": { + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "Metrics": { + "ReadRequirement": "Recommended" + } + } + }, + "Metrics": { + "ReadRequirement": "Recommended" + }, + "MinSpeedMHz": { + "ReadRequirement": "Recommended" + }, + "Model": { + "ReadRequirement": "Recommended" + }, + "PartNumber": { + "ReadRequirement": "Recommended" + }, + "Ports": { + "ReadRequirement": "Mandatory" + }, + "ProcessorType": { + "ReadRequirement": "Mandatory" + }, + "SerialNumber": { + "ReadRequirement": "Recommended" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "SystemInterface": { + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "PCIe": { + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "MaxLanes": { + "ReadRequirement": "Recommended" + }, + "MaxPCIeType": { + "ReadRequirement": "Recommended" + }, + "PCIeType": { + "ReadRequirement": "Recommended" + } + } + } + } + } + } + }, + "ProcessorCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "Sensor": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Reading": { + "ReadRequirement": "Mandatory" + }, + "ReadingType": { + "ReadRequirement": "Recommended" + }, + "ReadingUnits": { + "ReadRequirement": "Mandatory" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "Thresholds": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "LowerCaution": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "Reading": { + "ReadRequirement": "Supported" + } + } + }, + "LowerCritical": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "Reading": { + "ReadRequirement": "Supported" + } + } + }, + "LowerFatal": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "Reading": { + "ReadRequirement": "Supported" + } + } + }, + "UpperCritical": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "Reading": { + "ReadRequirement": "Supported" + } + } + }, + "UpperFatal": { + "ReadRequirement": "Supported", + "PropertyRequirements": { + "Reading": { + "ReadRequirement": "Supported" + } + } + } + } + } + } + }, + "SensorCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "ServiceRoot": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Chassis": { + "ReadRequirement": "Mandatory" + }, + "RedfishVersion": { + "ReadRequirement": "Mandatory" + }, + "Systems": { + "ReadRequirement": "Mandatory" + }, + "TelemetryService": { + "ReadRequirement": "Mandatory" + }, + "UUID": { + "ReadRequirement": "Mandatory" + }, + "UpdateService": { + "ReadRequirement": "Mandatory" + } + } + }, + "SoftwareInventory": { + "MinVersion": "1.0.0", + "ReadRequirement": "Recommended", + "PropertyRequirements": { + "Manufacturer": { + "ReadRequirement": "Recommended" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Recommended" + }, + "State": { + "ReadRequirement": "Recommended" + } + } + }, + "Updateable": { + "ReadRequirement": "Mandatory" + }, + "Version": { + "ReadRequirement": "Mandatory" + }, + "WriteProtected": { + "ReadRequirement": "Supported" + } + } + }, + "SoftwareInventoryCollection": { + "ReadRequirement": "Mandatory", + "URIs": [], + "PropertyRequirements": { + "Members": { + "ReadRequirement": "Mandatory", + "MinCount": 1 + }, + "Members@odata.count": { + "ReadRequirement": "Mandatory" + } + } + }, + "TelemetryService": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "MaxReports": { + "ReadRequirement": "Mandatory" + }, + "MetricReportDefinitions": { + "ReadRequirement": "Mandatory" + }, + "MetricReports": { + "ReadRequirement": "Mandatory" + }, + "MinCollectionInterval": { + "ReadRequirement": "Mandatory" + }, + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "State": { + "ReadRequirement": "Mandatory" + } + } + } + } + }, + "ThermalMetrics": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "TemperatureReadingsCelsius": { + "ReadRequirement": "Mandatory" + } + } + }, + "ThermalSubsystem": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Status": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Health": { + "ReadRequirement": "Mandatory" + }, + "State": { + "ReadRequirement": "Mandatory" + } + } + }, + "ThermalMetrics": { + "ReadRequirement": "Mandatory" + } + } + }, + "UpdateService": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "FirmwareInventory": { + "ReadRequirement": "Mandatory" + }, + "HttpPushUri": { + "ReadRequirement": "Mandatory" + }, + "HttpPushUriOptions": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "HttpPushUriApplyTime": { + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "ApplyTime": { + "ReadRequirement": "Mandatory" + } + } + } + } + }, + "ServiceEnabled": { + "ReadRequirement": "Mandatory" + }, + "SoftwareInventory": { + "ReadRequirement": "Mandatory" + } + } + } + }, + "Registries": { + "Base": { + "MinVersion": "1.0.0", + "Repository": "redfish.dmtf.org/registries", + "Messages": { + "Success": {}, + "GeneralError": {}, + "Created": {}, + "PropertyDuplicate": {} + } + } + } +} \ No newline at end of file diff --git a/json_spec/input/spec_1.1/UpdateService.json b/json_spec/input/spec_1.1/UpdateService.json new file mode 100644 index 0000000..cd7313b --- /dev/null +++ b/json_spec/input/spec_1.1/UpdateService.json @@ -0,0 +1,61 @@ +{ + "SchemaDefinition": "RedfishInteroperabilityProfile.v1_6_0", + "ProfileName": "OCPGPUManagement", + "ProfileVersion": "1.0.0", + "Purpose": "Specifies the OCP baseline hardware management requirements for the Redfish interface on GPU platforms.", + "OwningEntity": "Open Compute Project", + "ContributedBy": "Google, Meta, Microsoft, NVidia, AMD", + "License": "Creative Commons Attribution-ShareAlike 4.0 International License", + "ContactInfo": "", + "Protocol": { + "MinVersion": "1.0" + }, + "Resources": { + "UpdateService": { + "MinVersion": "1.0.0", + "ReadRequirement": "Mandatory", + "PropertyRequirements": { + "Id": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "Name": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "ServiceEnabled": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "FirmwareInventory": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "SoftwareInventory": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "MultipartHttpPushUri": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + }, + "MaxImageSizeBytes": { + "ReadRequirement": "Mandatory", + "Purpose": "Placeholder for Purpose Comment to be Added" + } + } + } + }, + "Registries": { + "Base": { + "MinVersion": "1.0.0", + "Repository": "redfish.dmtf.org/registries", + "Messages": { + "Success": {}, + "GeneralError": {}, + "Created": {}, + "PropertyDuplicate": {} + } + } + } +} diff --git a/json_spec/input/spec_1.1/spec_bindings.json b/json_spec/input/spec_1.1/spec_bindings.json new file mode 100644 index 0000000..209013d --- /dev/null +++ b/json_spec/input/spec_1.1/spec_bindings.json @@ -0,0 +1,5 @@ +{ +"FWUpdate":"1.1", +"Interfaces":"1.4", +"RAS":"1.7" +} \ No newline at end of file diff --git a/json_spec/input/spec_1.1/workspace/.netrc b/json_spec/input/spec_1.1/workspace/.netrc new file mode 100644 index 0000000..00ed1ab --- /dev/null +++ b/json_spec/input/spec_1.1/workspace/.netrc @@ -0,0 +1,7 @@ +machine +login +password + +machine +login +password \ No newline at end of file diff --git a/json_spec/input/spec_1.1/workspace/dut_info.json b/json_spec/input/spec_1.1/workspace/dut_info.json new file mode 100644 index 0000000..ef7768a --- /dev/null +++ b/json_spec/input/spec_1.1/workspace/dut_info.json @@ -0,0 +1,113 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "Need to add one", + "title": "dut_info", + "description": "TODO: link to md doc", + "properties": { + "ConnectionIPAddress": { + "description": "IP Address of Service Entry point to be used", + "type": "string", + "value": "" + }, + "ConnectionPort": { + "description": "IP Address of Service Entry point to be used", + "type": "string", + "value": "" + }, + "SSHTunnel": { + "description": "Indicates if the user wants to communicate through SSH tunnel", + "type": "bool", + "value": false + }, + "SSHTunnelPortList":{ + "description": "List of ports to connect to via SSH tunnelling", + "type": "list", + "value": [9999, 5555] + }, + "SSHTunnelProtocol":{ + "description": "User can now give https or http", + "type": "str", + "value": "" + }, + "SSHTunnelRemotePort":{ + "description": "Remote Redfish connection port to be used while SSH tunnelling", + "type": "int", + "value": 80 + }, + "SSHTunnelRemoteIPAddress": { + "description": "IP Address of Remote Port of the SSH tunnel to be used", + "type": "string", + "value": "" + }, + "SSHTunnelUsingSSHPASS": { + "description": "Option to use sshpass for portforwarding instead of sshtunnel", + "type": "bool", + "value": false + }, + "AuthenticationRequired": { + "description": "Indicates if REST API authentication is required by the Redfish service", + "type": "bool", + "value": false + }, + "PowerOffCommand": { + "description": "Command to use to turn off the system", + "type": "string", + "value": "" + }, + "PowerOnCommand": { + "description": "Command to use to turn on the system", + "type": "string", + "value": "" + }, + "PowerOffWaitTime": { + "description": "Off delay needed (in seconds) during FW Activation", + "type": "int", + "value": 60 + }, + "PowerOnWaitTime": { + "description": "Wait time (in seconds) during FW Activation post reset cycle", + "type": "int", + "value": 300 + }, + "FwStagingTimeMax": { + "description": "Maximum time in seconds taken by staging (copy) phase of full device FW update", + "type": "int", + "value": 600 + }, + "FwActivationTimeMax": { + "description": "Maximum time in seconds taken by activation phase of full device FW update", + "type": "int", + "value": 600 + }, + "IdleWaitTimeAfterFirmwareUpdate":{ + "description": "Wait time (in seconds) for runtime execution delay", + "type": "int", + "value": 300 + }, + "SanitizeLog":{ + "description": "Sanitize log file by removing any sensitive details (i.e. IP and credentials)", + "type": "bool", + "value": false + }, + "SingleShotPowerCycle": { + "description": "Option to use Single Shot Power cycle", + "type": "bool", + "value": false + }, + "SingleShotPowerCycleCommand": { + "description": "Single command to power off and power on the remote system", + "type": "string", + "value": "" + } + }, + "required": [ + "ConnectionTo", + "PowerOffCommand", + "PowerOnCommand", + "PowerOffWaitTime", + "PowerOnWaitTime", + "FwStagingTimeMax", + "FwActivationTimeMax", + "IdleWaitTimeAfterFirmwareUpdate" + ] +} diff --git a/json_spec/input/spec_1.1/workspace/package_info.json b/json_spec/input/spec_1.1/workspace/package_info.json new file mode 100644 index 0000000..eaa06a1 --- /dev/null +++ b/json_spec/input/spec_1.1/workspace/package_info.json @@ -0,0 +1,75 @@ +{ + "PackageHeaderInformation": { + "PackageHeaderIdentifier": "", + "PackageHeaderFormatVersion": "1", + "PackageReleaseDateTime": "", + "PackageVersionString": "" + }, + "GPU_FW_IMAGE": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "", + "HasSignature": "", + "SignatureStructBytes": "" + }, + "GPU_FW_IMAGE_OLD": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_BACKUP": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_LARGE": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_INVALID_SIGNED": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_UNSIGNED_COMPONENT": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_UNSIGNED_BUNDLE": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_CORRUPT": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "Vendor": "" + }, + "GPU_FW_IMAGE_CORRUPT_COMPONENT": { + "Path": "workspace", + "Package": "", + "Version": "", + "JSON": "", + "CorruptComponentIdentifier": "", + "MetadataSizeBytes": 4096, + "Vendor": "" + } +} \ No newline at end of file diff --git a/json_spec/input/spec_1.1/workspace/redfish_response_messages.json b/json_spec/input/spec_1.1/workspace/redfish_response_messages.json new file mode 100644 index 0000000..4955433 --- /dev/null +++ b/json_spec/input/spec_1.1/workspace/redfish_response_messages.json @@ -0,0 +1,4 @@ +{ + "UpdateProgress_Message":"UpdateInProgress", + "LargeFWImageUpdate": "PayloadTooLarge" +} \ No newline at end of file diff --git a/json_spec/input/spec_1.1/workspace/redfish_uri_config.json b/json_spec/input/spec_1.1/workspace/redfish_uri_config.json new file mode 100644 index 0000000..02c9836 --- /dev/null +++ b/json_spec/input/spec_1.1/workspace/redfish_uri_config.json @@ -0,0 +1,30 @@ +{ + "GPU":{ + "GPUMC": "/{GPUMC}", + "BaseURI":"/{GPUMC}/redfish/v1/", + "GPUCheckURI":"/Managers/{ManagerID}", + "BaseboardIDs":"['BaseboardIDs']", + "TaskServiceURI": "/TaskService/Tasks/" + }, + "GPU_FWUpdate":{ + "MultiPartFormData":true, + "HttpPushUriTargets":"HttpPushUriTargets", + "specific_targets":"[]", + "exclude_targets_list": [], + "MultiPartPushUriSupport":false, + "IsMultiPart":false, + "UpdateURI":"/UpdateService" + }, + "GPU_Telemetry":{ + }, + "GPU_RAS":{ + }, + "BMC":{ + "BaseURI":"/redfish/v1", + "SystemURI":"", + "BMCFWInventory":"" + } +} + + + diff --git a/json_spec/input/spec_1.1/workspace/test_runner.json b/json_spec/input/spec_1.1/workspace/test_runner.json new file mode 100644 index 0000000..20efadf --- /dev/null +++ b/json_spec/input/spec_1.1/workspace/test_runner.json @@ -0,0 +1,30 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "Need to add one", + "title": "Test Runner Configuration", + "description": "Configure the TestRunner obj, list of test cases overrides a test suite", + "spec_version": "1.1", + "output_override_directory": "", + "debug_mode": true, + "console_log": true, + "progress_bar": false, + "include_tags": [], + "exclude_tags": [], + "test_sequence" : [], + "group_sequence" : [], + "internal_testing": false, + "weighted_score": { + "L0": 100, "L1": 50, "L2": 20, "L3": 0 + }, + "normalized_score": { + "L0": 50, "L1": 35, "L2": 15, "L3": 0 + }, + "active_test_suite": [], + "dev_test_suite": [], + "full_compliance_test_suite": ["F0", "PROF", "F1", "PROF", "T97", "H5", "F8", "PROF", "F16", "PROF", "F88", "PROF", "F63", "F5", "F64", "F2", "F18", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "H83", "H81", "H8", "H6", "H4", "T0", "T4"], + "regression_test_suite_0": ["F0","F1", "F63", "F64", "F8", "F88", "T0"], + "regression_test_suite_1": ["F0", "F1", "F23", "F26", "F28", "F27", "F22", "F25", "F18", "F32"], + "regression_test_suite_2": ["F0", "F1", "F16", "F19", "F55", "F62", "F89"], + "regression_test_suite_3": ["F0","F1", "F8", "F16", "F18", "F22", "F23", "PROF", "F24", "F25", "F26", "F27", "F28", "F32", "F55", "F63", "F64", "F88", "R1", "R2", "R3", "T0", "T2", "T4"], + "test_uri_response_excel":"Excel file name for checking the response" +} \ No newline at end of file