diff --git a/test/functional/api/cas/cli_messages.py b/test/functional/api/cas/cli_messages.py index b145c1cc5..83de9c33d 100644 --- a/test/functional/api/cas/cli_messages.py +++ b/test/functional/api/cas/cli_messages.py @@ -252,23 +252,30 @@ r"Option '--cache-line-size \(-x\)' is not allowed" ] -def check_stderr_msg(output: Output, expected_messages, negate=False): - return __check_string_msg(output.stderr, expected_messages, negate) - - -def check_stdout_msg(output: Output, expected_messages, negate=False): - return __check_string_msg(output.stdout, expected_messages, negate) - - -def __check_string_msg(text: str, expected_messages, negate=False): - msg_ok = True - for msg in expected_messages: - matches = re.search(msg, text) - if not matches and not negate: - TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {text}.") - msg_ok = False - elif matches and negate: - TestRun.LOGGER.error(f"Message is incorrect, expected to not find: {msg}\n " - f"actual: {text}.") - msg_ok = False - return msg_ok +mdadm_partition_not_suitable_for_array = [ + r"\S+ is not suitable for this array" +] + +mdadm_device_or_resource_busy = [ + r"mdadm: cannot open \S+: Device or resource busy" +] + + +def check_string_msg_all(text: str, expected_messages): + if all([re.search(msg, text) for msg in expected_messages]): + return True + TestRun.LOGGER.error( + f"Message is incorrect, expected all of messages: {expected_messages}\n " + f'Got: "{text}" ' + f"At least one expected message not found" + ) + return False + + +def check_string_msg_any(text: str, expected_messages): + if any([re.search(m, text) for m in expected_messages]): + return True + TestRun.LOGGER.error( + f"Message is incorrect, expected one of: {expected_messages}\n " f'Got: "{text}"' + ) + return False diff --git a/test/functional/tests/cache_ops/test_multilevel_cache.py b/test/functional/tests/cache_ops/test_multilevel_cache.py index 183f1afda..83cc68d90 100644 --- a/test/functional/tests/cache_ops/test_multilevel_cache.py +++ b/test/functional/tests/cache_ops/test_multilevel_cache.py @@ -49,7 +49,7 @@ def test_remove_multilevel_core(): output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache1.cache_id), core_id=str(core1.core_id), force=True)) - cli_messages.check_stderr_msg(output, cli_messages.remove_multilevel_core) + cli_messages.check_string_msg_all(output.stderr, cli_messages.remove_multilevel_core) with TestRun.step("Stop cache."): casadm.stop_all_caches() diff --git a/test/functional/tests/ci/test_basic.py b/test/functional/tests/ci/test_basic.py index a41724403..9cc36a16c 100644 --- a/test/functional/tests/ci/test_basic.py +++ b/test/functional/tests/ci/test_basic.py @@ -9,7 +9,7 @@ from core.test_run import TestRun from api.cas import cli, casadm from api.cas.cli_messages import ( - check_stderr_msg, + check_string_msg_all, start_cache_on_already_used_dev, start_cache_with_existing_id ) @@ -72,14 +72,14 @@ def test_negative_start_cache(): output = TestRun.executor.run_expect_fail( cli.start_cmd(cache_dev_1.path, cache_id="2", force=True) ) - if not check_stderr_msg(output, start_cache_on_already_used_dev): + if not check_string_msg_all(output.stderr, start_cache_on_already_used_dev): TestRun.fail(f"Received unexpected error message: {output.stderr}") with TestRun.step("Start cache with the same ID on another cache device"): output = TestRun.executor.run_expect_fail( cli.start_cmd(cache_dev_2.path, cache_id="1", force=True) ) - if not check_stderr_msg(output, start_cache_with_existing_id): + if not check_string_msg_all(output.stderr, start_cache_with_existing_id): TestRun.fail(f"Received unexpected error message: {output.stderr}") diff --git a/test/functional/tests/ci/test_recovery.py b/test/functional/tests/ci/test_recovery.py index 1c4d06366..1775e9c9f 100644 --- a/test/functional/tests/ci/test_recovery.py +++ b/test/functional/tests/ci/test_recovery.py @@ -8,7 +8,7 @@ from api.cas import casadm from api.cas.cache_config import CacheMode from api.cas.cli import casadm_bin -from api.cas.cli_messages import check_stderr_msg, stop_cache_errors +from api.cas.cli_messages import check_string_msg_all, stop_cache_errors from core.test_run import TestRun from storage_devices.disk import DiskTypeLowerThan, DiskTypeSet, DiskType, Disk from test_tools.dd import Dd @@ -204,7 +204,7 @@ def dirty_stop(cache_disk, caches: list): for cache in caches: cmd = f"{casadm_bin} --stop-cache --cache-id {cache.cache_id} --no-data-flush" output = TestRun.executor.run(cmd) - if not check_stderr_msg(output, stop_cache_errors): + if not check_string_msg_all(output.stderr, stop_cache_errors): TestRun.fail(f"Cache {cache.cache_id} stopping should fail.") with TestRun.step("Turn on devices."): diff --git a/test/functional/tests/cli/test_cli_help_and_version.py b/test/functional/tests/cli/test_cli_help_and_version.py index 231597308..c53a9d245 100644 --- a/test/functional/tests/cli/test_cli_help_and_version.py +++ b/test/functional/tests/cli/test_cli_help_and_version.py @@ -10,7 +10,7 @@ from api.cas import casadm from api.cas.casadm_params import OutputFormat from api.cas.cli_help_messages import * -from api.cas.cli_messages import check_stderr_msg, check_stdout_msg +from api.cas.cli_messages import check_string_msg_all from core.test_run import TestRun @@ -24,80 +24,80 @@ def test_cli_help(shortcut): """ TestRun.LOGGER.info("Run 'help' for every 'casadm' command.") output = casadm.help(shortcut) - check_stdout_msg(output, casadm_help) + check_string_msg_all(output.stdout, casadm_help) output = TestRun.executor.run("casadm" + (" -S" if shortcut else " --start-cache") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, start_cache_help) + check_string_msg_all(output.stdout, start_cache_help) output = TestRun.executor.run("casadm" + (" -T" if shortcut else " --stop-cache") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, stop_cache_help) + check_string_msg_all(output.stdout, stop_cache_help) output = TestRun.executor.run("casadm" + (" -X" if shortcut else " --set-param") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, set_params_help) + check_string_msg_all(output.stdout, set_params_help) output = TestRun.executor.run("casadm" + (" -G" if shortcut else " --get-param") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, get_params_help) + check_string_msg_all(output.stdout, get_params_help) output = TestRun.executor.run("casadm" + (" -Q" if shortcut else " --set-cache-mode") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, set_cache_mode_help) + check_string_msg_all(output.stdout, set_cache_mode_help) output = TestRun.executor.run("casadm" + (" -A" if shortcut else " --add-core") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, add_core_help) + check_string_msg_all(output.stdout, add_core_help) output = TestRun.executor.run("casadm" + (" -R" if shortcut else " --remove-core") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, remove_core_help) + check_string_msg_all(output.stdout, remove_core_help) output = TestRun.executor.run("casadm" + " --remove-detached" + (" -H" if shortcut else " --help")) - check_stdout_msg(output, remove_detached_help) + check_string_msg_all(output.stdout, remove_detached_help) output = TestRun.executor.run("casadm" + (" -L" if shortcut else " --list-caches") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, list_help) + check_string_msg_all(output.stdout, list_help) output = TestRun.executor.run("casadm" + (" -P" if shortcut else " --stats") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, stats_help) + check_string_msg_all(output.stdout, stats_help) output = TestRun.executor.run("casadm" + (" -Z" if shortcut else " --reset-counters") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, reset_counters_help) + check_string_msg_all(output.stdout, reset_counters_help) output = TestRun.executor.run("casadm" + (" -F" if shortcut else " --flush-cache") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, flush_cache_help) + check_string_msg_all(output.stdout, flush_cache_help) output = TestRun.executor.run("casadm" + (" -C" if shortcut else " --io-class") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, ioclass_help) + check_string_msg_all(output.stdout, ioclass_help) output = TestRun.executor.run("casadm" + (" -V" if shortcut else " --version") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, version_help) + check_string_msg_all(output.stdout, version_help) output = TestRun.executor.run("casadm" + (" -H" if shortcut else " --help") + (" -H" if shortcut else " --help")) - check_stdout_msg(output, help_help) + check_string_msg_all(output.stdout, help_help) output = TestRun.executor.run("casadm" + " --standby" + (" -H" if shortcut else " --help")) - check_stdout_msg(output, standby_help) + check_string_msg_all(output.stdout, standby_help) output = TestRun.executor.run("casadm" + " --zero-metadata" + (" -H" if shortcut else " --help")) - check_stdout_msg(output, zero_metadata_help) + check_string_msg_all(output.stdout, zero_metadata_help) output = TestRun.executor.run("casadm" + (" -Y" if shortcut else " --yell") + (" -H" if shortcut else " --help")) - check_stderr_msg(output, unrecognized_stderr) - check_stdout_msg(output, unrecognized_stdout) + check_string_msg_all(output.stderr, unrecognized_stderr) + check_string_msg_all(output.stdout, unrecognized_stdout) @pytest.mark.parametrize("output_format", OutputFormat) diff --git a/test/functional/tests/cli/test_cli_standby.py b/test/functional/tests/cli/test_cli_standby.py index 71dc6ffcb..7f6e30ea2 100644 --- a/test/functional/tests/cli/test_cli_standby.py +++ b/test/functional/tests/cli/test_cli_standby.py @@ -18,7 +18,7 @@ from test_utils.output import CmdException from test_utils.size import Size, Unit from api.cas.cli_messages import ( - check_stderr_msg, + check_string_msg_all, missing_param, disallowed_param, operation_forbiden_in_standby, @@ -72,7 +72,7 @@ def test_standby_neg_cli_params(): TestRun.LOGGER.error( f'"{tested_cmd}" command succeeded despite missing required "{name}" parameter!' ) - if not check_stderr_msg(output, missing_param) or name not in output.stderr: + if not check_string_msg_all(output.stderr, missing_param) or name not in output.stderr: TestRun.LOGGER.error( f'Expected error message in format "{missing_param[0]}" with "{name}" ' f'(the missing param). Got "{output.stderr}" instead.' @@ -98,7 +98,7 @@ def test_standby_neg_cli_params(): TestRun.LOGGER.error( f'"{tested_cmd}" command succeeded despite disallowed "{name}" parameter!' ) - if not check_stderr_msg(output, disallowed_param): + if not check_string_msg_all(output.stderr, disallowed_param): TestRun.LOGGER.error( f'Expected error message in format "{disallowed_param[0]}" ' f'Got "{output.stderr}" instead.' @@ -153,7 +153,7 @@ def test_activate_neg_cli_params(): f'"{tested_cmd}" command succeeded despite missing obligatory' f' "{name}" parameter!' ) - if not check_stderr_msg(output, missing_param) or name not in output.stderr: + if not check_string_msg_all(ououtput.stderrtput, missing_param) or name not in output.stderr: TestRun.LOGGER.error( f'Expected error message in format "{missing_param[0]}" with "{name}" ' f'(the missing param). Got "{output.stderr}" instead.' @@ -178,7 +178,7 @@ def test_activate_neg_cli_params(): TestRun.LOGGER.error( f'"{tested_cmd}" command succeeded despite disallowed "{name}" parameter!' ) - if not check_stderr_msg(output, expected_error_message): + if not check_string_msg_all(output.stderr, expected_error_message): TestRun.LOGGER.error( f'Expected error message in format "{expected_error_message[0]}" ' f'Got "{output.stderr}" instead.' @@ -250,7 +250,7 @@ def test_standby_neg_cli_management(): TestRun.LOGGER.info(f"Verify {cmd}") output = TestRun.executor.run_expect_fail(cmd) - if not check_stderr_msg(output, operation_forbiden_in_standby): + if not check_string_msg_all(output.stderr, operation_forbiden_in_standby): TestRun.LOGGER.error( f'Expected the following error message "{operation_forbiden_in_standby[0]}" ' f'Got "{output.stderr}" instead.' @@ -289,7 +289,7 @@ def test_start_neg_cli_flags(): mutually_exclusive_cmd_init = f"{casadm_bin} --standby --init --load" \ f" {init_required_params}" output = TestRun.executor.run_expect_fail(mutually_exclusive_cmd_init) - if not check_stderr_msg(output, mutually_exclusive_params_init): + if not check_string_msg_all(output.stderr, mutually_exclusive_params_init): TestRun.LOGGER.error( f'Expected error message in format ' f'"{mutually_exclusive_params_init[0]}"' @@ -307,7 +307,7 @@ def test_start_neg_cli_flags(): for cmd in mutually_exclusive_cmd_load: output = TestRun.executor.run_expect_fail(cmd) - if not check_stderr_msg(output, mutually_exclusive_params_load): + if not check_string_msg_all(output.stderr, mutually_exclusive_params_load): TestRun.LOGGER.error( f'Expected error message in format ' f'"{mutually_exclusive_params_load[0]}"' @@ -353,7 +353,7 @@ def test_activate_without_detach(): cmd = f"{casadm_bin} --standby --activate --cache-id {cache_id} --cache-device " \ f"{cache_dev.path}" output = TestRun.executor.run(cmd) - if not check_stderr_msg(output, activate_without_detach): + if not check_string_msg_all(output.stderr, activate_without_detach): TestRun.LOGGER.error( f'Expected error message in format ' f'"{activate_without_detach[0]}"' @@ -452,7 +452,7 @@ def test_activate_neg_cache_line_size(): with TestRun.step("Try to activate cache instance"): with pytest.raises(CmdException) as cmdExc: output = standby_cache.standby_activate(standby_cache_dev) - if not check_stderr_msg(output, cache_line_size_mismatch): + if not check_string_msg_all(output.stderr, cache_line_size_mismatch): TestRun.LOGGER.error( f'Expected error message in format ' f'"{cache_line_size_mismatch[0]}"' @@ -507,7 +507,7 @@ def test_standby_init_with_preexisting_metadata(): cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)), ) ) - if not check_stderr_msg(output, start_cache_with_existing_metadata): + if not check_string_msg_all(output.stderr, start_cache_with_existing_metadata): TestRun.LOGGER.error( f"Invalid error message. Expected {start_cache_with_existing_metadata}." f"Got {output.stderr}" @@ -558,7 +558,7 @@ def test_standby_init_with_preexisting_filesystem(filesystem): cache_line_size=str(int(cls.value.value / Unit.KibiByte.value)), ) ) - if not check_stderr_msg(output, standby_init_with_existing_filesystem): + if not check_string_msg_all(output.stderr, standby_init_with_existing_filesystem): TestRun.LOGGER.error( f"Invalid error message. Expected {standby_init_with_existing_filesystem}." f"Got {output.stderr}" diff --git a/test/functional/tests/cli/test_cli_start_stop.py b/test/functional/tests/cli/test_cli_start_stop.py index 7f1b503d9..d5720c3f2 100644 --- a/test/functional/tests/cli/test_cli_start_stop.py +++ b/test/functional/tests/cli/test_cli_start_stop.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -54,7 +54,7 @@ def test_cli_start_stop_default_id(shortcut): TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}." f"\nNo cache should be present after stopping the cache.") output = casadm.list_caches(shortcut=shortcut) - cli_messages.check_stdout_msg(output, cli_messages.no_caches_running) + cli_messages.check_string_msg_all(output.stdout, cli_messages.no_caches_running) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @@ -97,7 +97,7 @@ def test_cli_start_stop_custom_id(shortcut): TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}." f"\nNo cache should be present after stopping the cache.") output = casadm.list_caches(shortcut=shortcut) - cli_messages.check_stdout_msg(output, cli_messages.no_caches_running) + cli_messages.check_string_msg_all(output.stdout, cli_messages.no_caches_running) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @@ -148,7 +148,7 @@ def test_cli_add_remove_default_id(shortcut): if len(caches) != 0: TestRun.fail("No cache should be present after stopping the cache.") output = casadm.list_caches(shortcut=shortcut) - cli_messages.check_stdout_msg(output, cli_messages.no_caches_running) + cli_messages.check_string_msg_all(output.stdout, cli_messages.no_caches_running) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) @@ -201,7 +201,7 @@ def test_cli_add_remove_custom_id(shortcut): if len(caches) != 0: TestRun.fail("No cache should be present after stopping the cache.") output = casadm.list_caches(shortcut=shortcut) - cli_messages.check_stdout_msg(output, cli_messages.no_caches_running) + cli_messages.check_string_msg_all(output.stdout, cli_messages.no_caches_running) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @@ -231,4 +231,4 @@ def test_cli_load_and_force(shortcut): ) if output.exit_code == 0: TestRun.fail("Loading cache with 'force' option should fail.") - cli_messages.check_stderr_msg(output, cli_messages.load_and_force) + cli_messages.check_string_msg_all(output.stderr, cli_messages.load_and_force) diff --git a/test/functional/tests/cli/test_zero_metadata_command.py b/test/functional/tests/cli/test_zero_metadata_command.py index f3fbf04d1..45fc0e5c3 100644 --- a/test/functional/tests/cli/test_zero_metadata_command.py +++ b/test/functional/tests/cli/test_zero_metadata_command.py @@ -1,7 +1,8 @@ # -# Copyright(c) 2021 Intel Corporation +# Copyright(c) 2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # + import time from datetime import timedelta @@ -23,14 +24,14 @@ @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_zero_metadata_negative_cases(): """ - title: Test for '--zero-metadata' negative cases. - description: | - Test for '--zero-metadata' scenarios with expected failures. - pass_criteria: - - Zeroing metadata without '--force' failed when run on cache. - - Zeroing metadata with '--force' failed when run on cache. - - Zeroing metadata failed when run on system drive. - - Load cache command failed after successfully zeroing metadata on the cache device. + title: Test for '--zero-metadata' negative cases. + description: | + Test for '--zero-metadata' scenarios with expected failures. + pass_criteria: + - Zeroing metadata without '--force' failed when run on cache. + - Zeroing metadata with '--force' failed when run on cache. + - Zeroing metadata failed when run on system drive. + - Load cache command failed after successfully zeroing metadata on the cache device. """ with TestRun.step("Prepare cache and core devices."): cache_dev, core_dev, cache_disk = prepare_devices() @@ -43,21 +44,21 @@ def test_zero_metadata_negative_cases(): casadm.zero_metadata(cache_dev) TestRun.LOGGER.error("Zeroing metadata should fail!") except CmdException as e: - cli_messages.check_stderr_msg(e.output, cli_messages.unavailable_device) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.unavailable_device) with TestRun.step("Try to zero metadata with '--force' option and validate error message."): try: casadm.zero_metadata(cache_dev, force=True) TestRun.LOGGER.error("Zeroing metadata with '--force' option should fail!") except CmdException as e: - cli_messages.check_stderr_msg(e.output, cli_messages.unavailable_device) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.unavailable_device) with TestRun.step("Try to zeroing metadata on system disk."): os_disks = get_system_disks() for os_disk in os_disks: output = TestRun.executor.run(cli.zero_metadata_cmd(str(os_disk))) if output.exit_code != 0: - cli_messages.check_stderr_msg(output, cli_messages.error_handling) + cli_messages.check_string_msg_all(output.stderr.stderr, cli_messages.error_handling) else: TestRun.LOGGER.error("Zeroing metadata should fail!") @@ -69,8 +70,10 @@ def test_zero_metadata_negative_cases(): casadm.zero_metadata(cache_dev) TestRun.LOGGER.info("Zeroing metadata successful!") except CmdException as e: - TestRun.LOGGER.error(f"Zeroing metadata should work for cache device after stopping " - f"cache! Error message: {e.output}") + TestRun.LOGGER.error( + f"Zeroing metadata should work for cache device after stopping " + f"cache! Error message: {e.output}" + ) with TestRun.step("Load cache."): try: @@ -85,12 +88,12 @@ def test_zero_metadata_negative_cases(): @pytest.mark.parametrizex("filesystem", Filesystem) def test_zero_metadata_filesystem(filesystem): """ - title: Test for '--zero-metadata' and filesystem. - description: | - Test for '--zero-metadata' on drive with filesystem. - pass_criteria: - - Zeroing metadata on device with filesystem failed and not removed filesystem. - - Zeroing metadata on mounted device failed. + title: Test for '--zero-metadata' and filesystem. + description: | + Test for '--zero-metadata' on drive with filesystem. + pass_criteria: + - Zeroing metadata on device with filesystem failed and not removed filesystem. + - Zeroing metadata on mounted device failed. """ mount_point = "/mnt" with TestRun.step("Prepare devices."): @@ -108,7 +111,7 @@ def test_zero_metadata_filesystem(filesystem): casadm.zero_metadata(core) TestRun.LOGGER.error("Zeroing metadata should fail!") except CmdException as e: - cli_messages.check_stderr_msg(e.output, cli_messages.no_cas_metadata) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.no_cas_metadata) file_system = get_device_filesystem_type(core.get_device_id()) @@ -123,21 +126,21 @@ def test_zero_metadata_filesystem(filesystem): casadm.zero_metadata(core) TestRun.LOGGER.error("Zeroing metadata should fail!") except CmdException as e: - cli_messages.check_stderr_msg(e.output, cli_messages.unavailable_device) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.unavailable_device) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_zero_metadata_dirty_data(): """ - title: Test for '--zero-metadata' and dirty data scenario. - description: | - Test for '--zero-metadata' with and without 'force' option if there are dirty data - on cache. - pass_criteria: - - Zeroing metadata without force failed on cache with dirty data. - - Zeroing metadata with force ran successfully on cache with dirty data. - - Cache started successfully after zeroing metadata on cache with dirty data. + title: Test for '--zero-metadata' and dirty data scenario. + description: | + Test for '--zero-metadata' with and without 'force' option if there are dirty data + on cache. + pass_criteria: + - Zeroing metadata without force failed on cache with dirty data. + - Zeroing metadata with force ran successfully on cache with dirty data. + - Cache started successfully after zeroing metadata on cache with dirty data. """ with TestRun.step("Prepare cache and core devices."): cache_dev, core_disk, cache_disk = prepare_devices() @@ -173,15 +176,17 @@ def test_zero_metadata_dirty_data(): casadm.zero_metadata(cache_dev) TestRun.LOGGER.error("Zeroing metadata without force should fail!") except CmdException as e: - cli_messages.check_stderr_msg(e.output, cli_messages.cache_dirty_data) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.cache_dirty_data) with TestRun.step("Zeroing metadata on cache device with force"): try: casadm.zero_metadata(cache_dev, force=True) TestRun.LOGGER.info("Zeroing metadata with force successful!") except CmdException as e: - TestRun.LOGGER.error(f"Zeroing metadata with force should work for cache device!" - f"Error message: {e.output}") + TestRun.LOGGER.error( + f"Zeroing metadata with force should work for cache device!" + f"Error message: {e.output}" + ) with TestRun.step("Start cache without 'force' option."): try: @@ -195,14 +200,14 @@ def test_zero_metadata_dirty_data(): @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) def test_zero_metadata_dirty_shutdown(): """ - title: Test for '--zero-metadata' and dirty shutdown scenario. - description: | - Test for '--zero-metadata' with and without 'force' option on cache which had been dirty - shut down before. - pass_criteria: - - Zeroing metadata without force failed on cache after dirty shutdown. - - Zeroing metadata with force ran successfully on cache after dirty shutdown. - - Cache started successfully after dirty shutdown and zeroing metadata on cache. + title: Test for '--zero-metadata' and dirty shutdown scenario. + description: | + Test for '--zero-metadata' with and without 'force' option on cache which had been dirty + shut down before. + pass_criteria: + - Zeroing metadata without force failed on cache after dirty shutdown. + - Zeroing metadata with force ran successfully on cache after dirty shutdown. + - Cache started successfully after dirty shutdown and zeroing metadata on cache. """ with TestRun.step("Prepare cache and core devices."): cache_dev, core_disk, cache_disk = prepare_devices() @@ -236,15 +241,17 @@ def test_zero_metadata_dirty_shutdown(): casadm.zero_metadata(cache_dev) TestRun.LOGGER.error("Zeroing metadata without force should fail!") except CmdException as e: - cli_messages.check_stderr_msg(e.output, cli_messages.cache_dirty_shutdown) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.cache_dirty_shutdown) with TestRun.step("Zeroing metadata on cache device with force"): try: casadm.zero_metadata(cache_dev, force=True) TestRun.LOGGER.info("Zeroing metadata with force successful!") except CmdException as e: - TestRun.LOGGER.error(f"Zeroing metadata with force should work for cache device!" - f"Error message: {e.output}") + TestRun.LOGGER.error( + f"Zeroing metadata with force should work for cache device!" + f"Error message: {e.output}" + ) with TestRun.step("Start cache."): try: @@ -255,10 +262,10 @@ def test_zero_metadata_dirty_shutdown(): def prepare_devices(): - cache_disk = TestRun.disks['cache'] + cache_disk = TestRun.disks["cache"] cache_disk.create_partitions([Size(100, Unit.MebiByte)]) cache_part = cache_disk.partitions[0] - core_disk = TestRun.disks['core'] + core_disk = TestRun.disks["core"] core_disk.create_partitions([Size(5, Unit.GibiByte)]) return cache_part, core_disk, cache_disk diff --git a/test/functional/tests/data_integrity/test_data_integrity_unplug.py b/test/functional/tests/data_integrity/test_data_integrity_unplug.py index 74d7f398c..f73644885 100644 --- a/test/functional/tests/data_integrity/test_data_integrity_unplug.py +++ b/test/functional/tests/data_integrity/test_data_integrity_unplug.py @@ -126,7 +126,7 @@ async def test_data_integrity_unplug(cache_mode): try: cache.stop(no_data_flush=True) except CmdException as e: - if not cli_messages.check_stderr_msg(e.output, cli_messages.stop_cache_errors): + if not cli_messages.check_string_msg_all(e.output.stderr, cli_messages.stop_cache_errors): raise with TestRun.step("Plug back the cache device"): diff --git a/test/functional/tests/fault_injection/test_fault_injection_core_in_raid.py b/test/functional/tests/fault_injection/test_fault_injection_core_in_raid.py new file mode 100644 index 000000000..ad012f8e1 --- /dev/null +++ b/test/functional/tests/fault_injection/test_fault_injection_core_in_raid.py @@ -0,0 +1,65 @@ +# +# Copyright(c) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# + +import pytest +from api.cas import casadm +from api.cas.cache_config import CacheMode +from core.test_run import TestRun +from storage_devices.disk import DiskType, DiskTypeSet +from storage_devices.raid import Raid, RaidConfiguration, MetadataVariant, Level +from test_utils.size import Size, Unit +from api.cas.cli_messages import ( + mdadm_partition_not_suitable_for_array, + mdadm_device_or_resource_busy, + check_string_msg_any, +) + +expected_msg_1 = mdadm_partition_not_suitable_for_array +expected_msg_2 = mdadm_device_or_resource_busy + + +@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WT]) +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) +@pytest.mark.require_disk("core", DiskTypeSet([DiskType.sata, DiskType.hdd])) +@pytest.mark.require_disk("core2", DiskTypeSet([DiskType.sata, DiskType.hdd])) +def test_fault_injection_core_in_raid(cache_mode): + """ + title: Try to create raid on device used as a core device + description: Verify that it is impossible to use an underlying core disk as raid member + pass_criteria: + - Expected to reject RAID creation with proper warning. + """ + with TestRun.step("Prepare CAS device."): + cache_disk = TestRun.disks["cache"] + first_core_disk = TestRun.disks["core"] + second_core_disk = TestRun.disks["core2"] + cache_disk.create_partitions([Size(2, Unit.GibiByte)]) + first_core_disk.create_partitions([Size(2, Unit.GibiByte)]) + second_core_disk.create_partitions([Size(2, Unit.GibiByte)]) + cache_dev = cache_disk.partitions[0] + first_core_dev = first_core_disk.partitions[0] + second_core_dev = second_core_disk.partitions[0] + + with TestRun.step("Start cas and add core."): + cache = casadm.start_cache(cache_dev, cache_mode, force=True) + casadm.add_core(cache, first_core_dev) + + with TestRun.step("Attempt to use core device to build SW RAID."): + config = RaidConfiguration( + level=Level.Raid1, metadata=MetadataVariant.Legacy, number_of_devices=2 + ) + + try: + Raid.create(config, [first_core_dev, second_core_dev]) + TestRun.fail(f"RAID created successfully. Expected otherwise.") + except Exception as ex: + output = ex.output.stderr + + with TestRun.step("Looking for any of 2 expected messages."): + + if check_string_msg_any(output, expected_msg_1 + expected_msg_2): + TestRun.LOGGER.info("RAID not created. Found expected warning in exception message.") + else: + TestRun.LOGGER.error(f"RAID not created but warning message not as expected.\n") \ No newline at end of file diff --git a/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py b/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py index 273c5a4ac..3c3873e06 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py +++ b/test/functional/tests/fault_injection/test_fault_injection_opencas_load.py @@ -69,7 +69,7 @@ def test_stop_no_flush_load_cache(cache_mode): output = TestRun.executor.run_expect_fail(cli.start_cmd( cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()), force=False, load=False)) - cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata) + cli_messages.check_string_msg_all(output.stderr, cli_messages.start_cache_with_existing_metadata) with TestRun.step("Load cache."): cache = casadm.load_cache(cache.cache_device) diff --git a/test/functional/tests/fault_injection/test_fault_injection_standby.py b/test/functional/tests/fault_injection/test_fault_injection_standby.py index 53bf942d3..310906e15 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_standby.py +++ b/test/functional/tests/fault_injection/test_fault_injection_standby.py @@ -13,7 +13,7 @@ from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan from test_utils.size import Size, Unit -from api.cas.cli_messages import check_stderr_msg, missing_param, disallowed_param +from api.cas.cli_messages import check_string_msg_all, missing_param, disallowed_param from api.cas.cache_config import CacheLineSize, CacheMode from api.cas.cli import standby_activate_cmd, standby_load_cmd from api.cas.dmesg import get_md_section_size diff --git a/test/functional/tests/fault_injection/test_fault_injection_standby_core.py b/test/functional/tests/fault_injection/test_fault_injection_standby_core.py index 514d0752c..8f6f5246b 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_standby_core.py +++ b/test/functional/tests/fault_injection/test_fault_injection_standby_core.py @@ -16,8 +16,7 @@ from test_tools.dd import Dd from api.cas.cli import standby_activate_cmd from api.cas.cli_messages import ( - check_stderr_msg, - check_stdout_msg, + check_string_msg_all, activate_with_different_cache_id, load_inactive_core_missing, cache_activated_successfully, @@ -83,7 +82,7 @@ def test_activate_neg_cache_id(): output = TestRun.executor.run_expect_fail( standby_activate_cmd(cache_dev=standby_dev.path, cache_id=str(standby_cache_id)) ) - if not check_stderr_msg(output, activate_with_different_cache_id): + if not check_string_msg_all(output.stderr, activate_with_different_cache_id): TestRun.LOGGER.error( f"Invalid error message. Expected {activate_with_different_cache_id}." f"Got {output.stderr}" @@ -147,8 +146,8 @@ def test_activate_incomplete_cache(): with TestRun.step("Activate standby cache and check if a proper incompleteness info appeared"): output = cache.standby_activate(device=cache_dev) - check_stderr_msg(output, load_inactive_core_missing) - check_stdout_msg(output, cache_activated_successfully) + check_string_msg_all(output.stderr, load_inactive_core_missing) + check_string_msg_all(output.stdout, cache_activated_successfully) with TestRun.step("Verify that the cache is in Incomplete state"): status = cache.get_status() @@ -263,8 +262,8 @@ def test_activate_neg_core_size(): activate_cmd = standby_activate_cmd(cache_dev=cache_dev.path, cache_id="1") for i in range(10): output = TestRun.executor.run_expect_fail(activate_cmd) - check_stderr_msg(output, error_activating_cache) - check_stderr_msg(output, invalid_core_volume_size) + check_string_msg_all(output.stderr, error_activating_cache) + check_string_msg_all(output.stderr, invalid_core_volume_size) with TestRun.step("Verify that the cache is in Standby detached state"): status = cache.get_status() @@ -281,7 +280,7 @@ def test_activate_neg_core_size(): with TestRun.step("Activate standby cache"): output = cache.standby_activate(device=cache_dev) - check_stdout_msg(output, cache_activated_successfully) + check_string_msg_all(output.stdout, cache_activated_successfully) with TestRun.step("Verify that the cache is in Running state"): status = cache.get_status() diff --git a/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py b/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py index 80987e7de..eb6f1aae3 100644 --- a/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py +++ b/test/functional/tests/fault_injection/test_fault_injection_with_mounted_core.py @@ -107,11 +107,11 @@ def test_stop_cache_with_mounted_partition(): with TestRun.step("Try to remove core from cache."): output = TestRun.executor.run_expect_fail(cli.remove_core_cmd(cache_id=str(cache.cache_id), core_id=str(core.core_id))) - cli_messages.check_stderr_msg(output, cli_messages.remove_mounted_core) + cli_messages.check_string_msg_all(output.stderr, cli_messages.remove_mounted_core) with TestRun.step("Try to stop CAS."): output = TestRun.executor.run_expect_fail(cli.stop_cmd(cache_id=str(cache.cache_id))) - cli_messages.check_stderr_msg(output, cli_messages.stop_cache_mounted_core) + cli_messages.check_string_msg_all(output.stderr, cli_messages.stop_cache_mounted_core) with TestRun.step("Unmount core device."): core.unmount() diff --git a/test/functional/tests/fault_injection/test_fault_power_reboot.py b/test/functional/tests/fault_injection/test_fault_power_reboot.py index 22237e727..925863cd7 100644 --- a/test/functional/tests/fault_injection/test_fault_power_reboot.py +++ b/test/functional/tests/fault_injection/test_fault_power_reboot.py @@ -57,9 +57,9 @@ def test_fault_power_reboot(): output = TestRun.executor.run_expect_fail(cli.start_cmd( cache_dev=str(cache_dev.path), force=False, load=False)) - if cli_messages.check_stderr_msg(output, cli_messages.error_inserting_cache) and \ - cli_messages.check_stderr_msg(output, - cli_messages.reinitialize_with_force_or_recovery): + if cli_messages.check_string_msg_all(output.stderr, cli_messages.error_inserting_cache) and \ + cli_messages.check_string_msg_all(output.stderr, + cli_messages.reinitialize_with_force_or_recovery): TestRun.LOGGER.info(f"Found expected exception: {cli_messages.error_inserting_cache}" f" and {cli_messages.reinitialize_with_force_or_recovery}") @@ -86,7 +86,7 @@ def check_log(last_read_line, expected_message): cmd = f"tail -qn +{last_read_line} {log_path}" log = TestRun.executor.run(cmd) - if cli_messages.check_stdout_msg(log, expected_message): + if cli_messages.check_string_msg_all(log.stdout, expected_message): TestRun.LOGGER.info(f"Found expected message in log: {expected_message}") return True else: diff --git a/test/functional/tests/incremental_load/test_incremental_load.py b/test/functional/tests/incremental_load/test_incremental_load.py index 874a27035..211c06cbf 100644 --- a/test/functional/tests/incremental_load/test_incremental_load.py +++ b/test/functional/tests/incremental_load/test_incremental_load.py @@ -277,7 +277,7 @@ def test_load_cache_with_inactive_core(): with TestRun.step("Load cache."): output = TestRun.executor.run(cli.load_cmd(cache_dev.path)) - cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing) + cli_messages.check_string_msg_all(output.stderr, cli_messages.load_inactive_core_missing) with TestRun.step("Plug missing device and stop cache."): plug_device.plug() @@ -656,8 +656,8 @@ def test_remove_inactive_devices(): except CmdException as e: TestRun.LOGGER.info(f"Remove core operation is blocked for inactive CAS device " f"as expected. Force option set to: {force}") - cli_messages.check_stderr_msg( - e.output, cli_messages.remove_inactive_core_with_remove_command) + cli_messages.check_string_msg_all( + e.output.stderr, cli_messages.remove_inactive_core_with_remove_command) output = casadm.list_caches(output_format=OutputFormat.csv).stdout if core.core_device.path not in output: @@ -681,7 +681,7 @@ def test_remove_inactive_devices(): except CmdException as e: TestRun.LOGGER.info("Remove-inactive operation without force option is blocked for " "dirty CAS device as expected.") - cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_dirty_core) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.remove_inactive_dirty_core) output = casadm.list_caches(output_format=OutputFormat.csv).stdout if core.core_device.path not in output: TestRun.fail(f"CAS device is not listed in casadm list output but it should be." @@ -780,7 +780,7 @@ def try_stop_incomplete_cache(cache): cache.stop() except CmdException as e: TestRun.LOGGER.info("Stopping cache without 'no data flush' option is blocked as expected.") - cli_messages.check_stderr_msg(e.output, cli_messages.stop_cache_incomplete) + cli_messages.check_string_msg_all(e.output.stderr, cli_messages.stop_cache_incomplete) def check_inactive_usage_stats(stats_before, stats_after, stat_name, should_be_zero): diff --git a/test/functional/tests/initialize/test_negative_load.py b/test/functional/tests/initialize/test_negative_load.py index 973e3e6dc..0a6dbc285 100644 --- a/test/functional/tests/initialize/test_negative_load.py +++ b/test/functional/tests/initialize/test_negative_load.py @@ -101,13 +101,13 @@ def test_add_cached_core(): core_id=str(core.core_id), ) ) - cli_messages.check_stderr_msg(output, cli_messages.add_cached_core) + cli_messages.check_string_msg_all(output.stderr, cli_messages.add_cached_core) with TestRun.step("Try adding the same core device to the same cache for the second time."): output = TestRun.executor.run_expect_fail( cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.path)) ) - cli_messages.check_stderr_msg(output, cli_messages.already_cached_core) + cli_messages.check_string_msg_all(output.stderr, cli_messages.already_cached_core) with TestRun.step("Stop caches."): casadm.stop_all_caches() diff --git a/test/functional/tests/io_class/test_io_class_prevent_wrong_configuration.py b/test/functional/tests/io_class/test_io_class_prevent_wrong_configuration.py index c895b06b1..d0fbc30fc 100644 --- a/test/functional/tests/io_class/test_io_class_prevent_wrong_configuration.py +++ b/test/functional/tests/io_class/test_io_class_prevent_wrong_configuration.py @@ -150,7 +150,7 @@ def try_load_malformed_config(cache, config_io_classes, expected_err_msg): create_and_load_default_io_class_config(cache) except CmdException as e: TestRun.LOGGER.info(f"Open CAS did not load malformed config file as expected.") - cli_messages.check_stderr_msg(e.output, expected_err_msg) + cli_messages.check_string_msg_all(e.output.stderr, expected_err_msg) output_io_classes = cache.list_io_classes() if not IoClass.compare_ioclass_lists(output_io_classes, config_io_classes): output_str = "\n".join(str(i) for i in output_io_classes) diff --git a/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py b/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py index e212b91ea..938866cc3 100644 --- a/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py +++ b/test/functional/tests/lazy_writes/recovery/test_recovery_unplug.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -85,7 +85,7 @@ def test_recovery_unplug_cache_fs(cache_mode, cls, filesystem, direct): cache.stop(no_data_flush=True) TestRun.LOGGER.warning("Expected stopping cache with errors with --no-flush flag.") except CmdException as e1: - cli_messages.check_stderr_msg(e1.output, cli_messages.stop_cache_errors) + cli_messages.check_string_msg_all(e1.output.stderr, cli_messages.stop_cache_errors) with TestRun.step("Plug missing cache device."): TestRun.LOGGER.info(str(casadm.list_caches(by_id_path=False))) @@ -170,7 +170,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls): cache.stop(no_data_flush=True) TestRun.LOGGER.warning("Expected stopping cache with errors with --no-flush flag.") except CmdException as e1: - cli_messages.check_stderr_msg(e1.output, cli_messages.stop_cache_errors) + cli_messages.check_string_msg_all(e1.output.stderr, cli_messages.stop_cache_errors) with TestRun.step("Plug missing cache device."): TestRun.LOGGER.info(str(casadm.list_caches(by_id_path=False))) diff --git a/test/functional/tests/misc/test_device_capabilities.py b/test/functional/tests/misc/test_device_capabilities.py index 95e591d8c..decec852f 100644 --- a/test/functional/tests/misc/test_device_capabilities.py +++ b/test/functional/tests/misc/test_device_capabilities.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2020-2021 Intel Corporation +# Copyright(c) 2020-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # @@ -22,67 +22,91 @@ @pytest.mark.require_plugin("scsi_debug") def test_device_capabilities(): """ - title: Test whether CAS device capabilities are properly set. - description: | - Test if CAS device takes into consideration differences between devices which are used to - create it. - pass_criteria: - - CAS device starts successfully using differently configured devices. - - CAS device capabilities are as expected. + title: Test whether CAS device capabilities are properly set. + description: | + Test if CAS device takes into consideration differences between devices which are used to + create it. + pass_criteria: + - CAS device starts successfully using differently configured devices. + - CAS device capabilities are as expected. """ - core_device = TestRun.disks['core'] - max_io_size_path = os.path.join(disk_utils.get_sysfs_path(core_device.get_device_id()), - 'queue/max_sectors_kb') + core_device = TestRun.disks["core"] + max_io_size_path = os.path.join( + disk_utils.get_sysfs_path(core_device.get_device_id()), "queue/max_sectors_kb" + ) default_max_io_size = fs_utils.read_file(max_io_size_path) iteration_settings = [ - {"device": "SCSI-debug module", - "dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 1024}, - {"device": "SCSI-debug module", - "dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 256}, - {"device": "SCSI-debug module", - "dev_size_mb": 1024, "logical_block_size": 512, "max_sectors_kb": 128}, - {"device": "SCSI-debug module", - "dev_size_mb": 2048, "logical_block_size": 2048, "max_sectors_kb": 1024}, - {"device": "standard core device", - "max_sectors_kb": int(default_max_io_size)}, - {"device": "standard core device", "max_sectors_kb": 128} + { + "device": "SCSI-debug module", + "dev_size_mb": 1024, + "logical_block_size": 512, + "max_sectors_kb": 1024, + }, + { + "device": "SCSI-debug module", + "dev_size_mb": 1024, + "logical_block_size": 512, + "max_sectors_kb": 256, + }, + { + "device": "SCSI-debug module", + "dev_size_mb": 1024, + "logical_block_size": 512, + "max_sectors_kb": 128, + }, + { + "device": "SCSI-debug module", + "dev_size_mb": 2048, + "logical_block_size": 2048, + "max_sectors_kb": 1024, + }, + {"device": "standard core device", "max_sectors_kb": int(default_max_io_size)}, + {"device": "standard core device", "max_sectors_kb": 128}, ] for i in range(0, len(iteration_settings)): device = iteration_settings[i]["device"] group_title = f"{device} | " if device == "SCSI-debug module": - group_title += f"dev_size_mb = {iteration_settings[i]['dev_size_mb']} | " \ - f"logical_block_size = {iteration_settings[i]['logical_block_size']} | " + group_title += ( + f"dev_size_mb = {iteration_settings[i]['dev_size_mb']} | " + f"logical_block_size = {iteration_settings[i]['logical_block_size']} | " + ) group_title += f"max_sectors_kb = {iteration_settings[i]['max_sectors_kb']}" with TestRun.group(group_title): with TestRun.step("Prepare devices."): core_device = prepare_core_device(iteration_settings[i]) - cache_device = TestRun.disks['cache'] + cache_device = TestRun.disks["cache"] with TestRun.step("Start cache and add prepared core device as core."): cache, core, error_output = prepare_cas_device(cache_device, core_device) - with TestRun.step("Compare capabilities for CAS device, cache and core " - "(or check proper error if logical sector mismatch occurs)."): + with TestRun.step( + "Compare capabilities for CAS device, cache and core " + "(or check proper error if logical sector mismatch occurs)." + ): compare_capabilities(cache_device, core_device, cache, core, error_output) with TestRun.step("Recreate CAS device with switched cache and core devices."): cache, core, error_output = prepare_cas_device(core_device, cache_device) - with TestRun.step("Compare capabilities for CAS device, cache and core " - "(or check proper error if logical sector mismatch occurs)."): + with TestRun.step( + "Compare capabilities for CAS device, cache and core " + "(or check proper error if logical sector mismatch occurs)." + ): compare_capabilities(core_device, cache_device, cache, core, error_output) # Methods used in test + def prepare_core_device(settings): if settings["device"] == "SCSI-debug module": core_device = create_scsi_debug_device( - settings["logical_block_size"], 4, settings["dev_size_mb"]) + settings["logical_block_size"], 4, settings["dev_size_mb"] + ) else: - core_device = TestRun.disks['core'] + core_device = TestRun.disks["core"] core_device.set_max_io_size(Size(settings["max_sectors_kb"], Unit.KibiByte)) return core_device @@ -93,9 +117,9 @@ def create_scsi_debug_device(sector_size: int, physblk_exp: int, dev_size_mb=102 "virtual_gb": "200", "dev_size_mb": str(dev_size_mb), "sector_size": str(sector_size), - "physblk_exp": str(physblk_exp) + "physblk_exp": str(physblk_exp), } - scsi_debug = TestRun.plugin_manager.get_plugin('scsi_debug') + scsi_debug = TestRun.plugin_manager.get_plugin("scsi_debug") scsi_debug.params = scsi_debug_params scsi_debug.reload() return TestRun.scsi_debug_devices[0] @@ -110,13 +134,16 @@ def prepare_cas_device(cache_device, core_device): if cache_dev_bs > core_dev_bs: TestRun.LOGGER.error( f"CAS device started with cache device logical block size ({cache_dev_bs}) " - f"greater than core device logical block size ({core_dev_bs})") + f"greater than core device logical block size ({core_dev_bs})" + ) return cache, core, None except CmdException as e: if cache_dev_bs <= core_dev_bs: TestRun.fail("Failed to create CAS device.") - TestRun.LOGGER.info("Cannot add core device with mismatching logical sector size. " - "Check output instead of capabilities.") + TestRun.LOGGER.info( + "Cannot add core device with mismatching logical sector size. " + "Check output instead of capabilities." + ) return cache, None, e.output @@ -132,23 +159,26 @@ def method_lcm_not_zero(a, b): # device capabilities and their test comparison methods -capabilities = {"logical_block_size": max, - "max_hw_sectors_kb": None, - "max_integrity_segments": method_min_not_zero, - "max_sectors_kb": None, - "max_segments": None, - "minimum_io_size": max, - "optimal_io_size": method_lcm_not_zero, - "physical_block_size": max, - "write_same_max_bytes": min} +capabilities = { + "logical_block_size": max, + "max_hw_sectors_kb": None, + "max_integrity_segments": method_min_not_zero, + "max_sectors_kb": None, + "max_segments": None, + "minimum_io_size": max, + "optimal_io_size": method_lcm_not_zero, + "physical_block_size": max, + "write_same_max_bytes": min, +} def measure_capabilities(dev): dev_capabilities = {} - dev_id = dev.parent_device.get_device_id() if isinstance(dev, Partition) \ - else dev.get_device_id() + dev_id = ( + dev.parent_device.get_device_id() if isinstance(dev, Partition) else dev.get_device_id() + ) for c in capabilities: - path = os.path.join(disk_utils.get_sysfs_path(dev_id), 'queue', c) + path = os.path.join(disk_utils.get_sysfs_path(dev_id), "queue", c) command = f"cat {path}" output = TestRun.executor.run(command) if output.exit_code == 0: @@ -161,17 +191,18 @@ def measure_capabilities(dev): def compare_capabilities(cache_device, core_device, cache, core, msg): if core is None: - cli_messages.check_stderr_msg(msg, - cli_messages.try_add_core_sector_size_mismatch) + cli_messages.check_string_msg_all(msg.stderr, cli_messages.try_add_core_sector_size_mismatch) else: - core_dev_sectors_num = \ - disk_utils.get_size(core_device.get_device_id()) / disk_utils.get_block_size( - core_device.get_device_id()) + core_dev_sectors_num = disk_utils.get_size( + core_device.get_device_id() + ) / disk_utils.get_block_size(core_device.get_device_id()) core_sectors_num = disk_utils.get_size(core.get_device_id()) / disk_utils.get_block_size( - core.get_device_id()) + core.get_device_id() + ) if core_dev_sectors_num != core_sectors_num: TestRun.LOGGER.error( - "Number of sectors in CAS device and attached core device is different.") + "Number of sectors in CAS device and attached core device is different." + ) cache.stop() return cas_capabilities = measure_capabilities(core) @@ -198,10 +229,14 @@ def compare_capabilities(cache_device, core_device, cache, core, msg): expected_val = new_expected_val // 2 if expected_val != cas_val: - TestRun.LOGGER.error(f"Cas device {capability} is not set properly. Is: {cas_val}, " - f"should be {expected_val} (cache: {cache_val}, " - f"core: {core_val})") + TestRun.LOGGER.error( + f"Cas device {capability} is not set properly. Is: {cas_val}, " + f"should be {expected_val} (cache: {cache_val}, " + f"core: {core_val})" + ) continue - TestRun.LOGGER.info(f"Cas device {capability} has proper value: {cas_val} " - f"(cache: {cache_val}, core: {core_val})") + TestRun.LOGGER.info( + f"Cas device {capability} has proper value: {cas_val} " + f"(cache: {cache_val}, core: {core_val})" + ) cache.stop() diff --git a/test/functional/tests/stats/test_ioclass_stats.py b/test/functional/tests/stats/test_ioclass_stats.py index 5f0396366..9f07a04fc 100644 --- a/test/functional/tests/stats/test_ioclass_stats.py +++ b/test/functional/tests/stats/test_ioclass_stats.py @@ -1,5 +1,5 @@ # -# Copyright(c) 2019-2021 Intel Corporation +# Copyright(c) 2019-2022 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # import random @@ -11,9 +11,9 @@ from api.cas.cache_config import CleaningPolicy, CacheMode, CacheLineSize from api.cas.casadm import StatsFilter from api.cas.cli_messages import ( - check_stderr_msg, + check_string_msg_all, get_stats_ioclass_id_not_configured, - get_stats_ioclass_id_out_of_range + get_stats_ioclass_id_out_of_range, ) from api.cas.statistics import ( config_stats_ioclass, @@ -21,7 +21,7 @@ usage_stats_ioclass, request_stats, block_stats_core, - block_stats_cache + block_stats_cache, ) from core.test_run import TestRun from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan @@ -42,13 +42,13 @@ @pytest.mark.parametrize("random_cls", [random.choice(list(CacheLineSize))]) def test_ioclass_stats_basic(random_cls): """ - title: Basic test for retrieving IO class statistics. - description: | - Check if statistics are retrieved only for configured IO classes. - pass_criteria: - - Statistics are retrieved for configured IO classes. - - Error is displayed when retrieving statistics for non-configured IO class. - - Error is displayed when retrieving statistics for out of range IO class id. + title: Basic test for retrieving IO class statistics. + description: | + Check if statistics are retrieved only for configured IO classes. + pass_criteria: + - Statistics are retrieved for configured IO classes. + - Error is displayed when retrieving statistics for non-configured IO class. + - Error is displayed when retrieving statistics for out of range IO class id. """ min_ioclass_id = 11 @@ -60,40 +60,41 @@ def test_ioclass_stats_basic(random_cls): with TestRun.step("Prepare IO class config file"): ioclass_list = [] for class_id in range(min_ioclass_id, max_ioclass_id): - ioclass_list.append(IoClass( - class_id=class_id, - rule=f"file_size:le:{4096 * class_id}&done", - priority=22 - )) + ioclass_list.append( + IoClass(class_id=class_id, rule=f"file_size:le:{4096 * class_id}&done", priority=22) + ) IoClass.save_list_to_config_file(ioclass_list, True) with TestRun.step("Load IO class config file"): casadm.load_io_classes(cache_id, file=ioclass_config.default_config_file_path) - with TestRun.step("Try retrieving IO class stats for all allowed id values " - "and one out of range id"): + with TestRun.step( + "Try retrieving IO class stats for all allowed id values " "and one out of range id" + ): for class_id in range(ioclass_config.MAX_IO_CLASS_ID + 2): out_of_range = " out of range" if class_id > ioclass_config.MAX_IO_CLASS_ID else "" with TestRun.group(f"Checking{out_of_range} IO class id {class_id}..."): expected = class_id == 0 or class_id in range(min_ioclass_id, max_ioclass_id) try: casadm.print_statistics( - cache_id=cache_id, - io_class_id=class_id, - per_io_class=True) + cache_id=cache_id, io_class_id=class_id, per_io_class=True + ) if not expected: TestRun.LOGGER.error( - f"Stats retrieved for not configured IO class {class_id}") + f"Stats retrieved for not configured IO class {class_id}" + ) except CmdException as e: if expected: TestRun.LOGGER.error(f"Stats not retrieved for IO class id: {class_id}") elif class_id <= ioclass_config.MAX_IO_CLASS_ID: - if not check_stderr_msg(e.output, get_stats_ioclass_id_not_configured): + if not check_string_msg_all(e.output.stderr, get_stats_ioclass_id_not_configured): TestRun.LOGGER.error( - f"Wrong message for unused IO class id: {class_id}") - elif not check_stderr_msg(e.output, get_stats_ioclass_id_out_of_range): + f"Wrong message for unused IO class id: {class_id}" + ) + elif not check_string_msg_all(e.output.stderr, get_stats_ioclass_id_out_of_range): TestRun.LOGGER.error( - f"Wrong message for out of range IO class id: {class_id}") + f"Wrong message for out of range IO class id: {class_id}" + ) @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @@ -101,12 +102,12 @@ def test_ioclass_stats_basic(random_cls): @pytest.mark.parametrize("random_cls", [random.choice(list(CacheLineSize))]) def test_ioclass_stats_sum(random_cls): """ - title: Test for sum of IO class statistics. - description: | - Check if statistics for configured IO classes sum up to cache/core statistics. - pass_criteria: - - Per class cache IO class statistics sum up to cache statistics. - - Per class core IO class statistics sum up to core statistics. + title: Test for sum of IO class statistics. + description: | + Check if statistics for configured IO classes sum up to cache/core statistics. + pass_criteria: + - Per class cache IO class statistics sum up to cache statistics. + - Per class core IO class statistics sum up to core statistics. """ min_ioclass_id = 1 @@ -120,11 +121,13 @@ def test_ioclass_stats_sum(random_cls): with TestRun.step("Prepare IO class config file"): ioclass_list = [] for class_id in range(min_ioclass_id, max_ioclass_id): - ioclass_list.append(IoClass( - class_id=class_id, - rule=f"file_size:le:{file_size_base * class_id}&done", - priority=22 - )) + ioclass_list.append( + IoClass( + class_id=class_id, + rule=f"file_size:le:{file_size_base * class_id}&done", + priority=22, + ) + ) IoClass.save_list_to_config_file(ioclass_list, True) with TestRun.step("Load IO class config file"): @@ -213,35 +216,38 @@ def test_ioclass_stats_sum(random_cls): @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) -@pytest.mark.parametrize("stat_filter", [StatsFilter.req, StatsFilter.usage, StatsFilter.conf, - StatsFilter.blk]) +@pytest.mark.parametrize( + "stat_filter", [StatsFilter.req, StatsFilter.usage, StatsFilter.conf, StatsFilter.blk] +) @pytest.mark.parametrize("per_core", [True, False]) @pytest.mark.parametrize("random_cls", [random.choice(list(CacheLineSize))]) def test_ioclass_stats_sections(stat_filter, per_core, random_cls): """ - title: Test for cache/core IO class statistics sections. - description: | - Check if IO class statistics sections for cache/core print all required entries and - no additional ones. - pass_criteria: - - Section statistics contain all required entries. - - Section statistics do not contain any additional entries. + title: Test for cache/core IO class statistics sections. + description: | + Check if IO class statistics sections for cache/core print all required entries and + no additional ones. + pass_criteria: + - Section statistics contain all required entries. + - Section statistics do not contain any additional entries. """ with TestRun.step("Test prepare"): caches, cores = prepare(random_cls, cache_count=4, cores_per_cache=3) - with TestRun.step(f"Validate displayed {stat_filter.name} statistics for default IO class for " - f"{'cores' if per_core else 'caches'}"): + with TestRun.step( + f"Validate displayed {stat_filter.name} statistics for default IO class for " + f"{'cores' if per_core else 'caches'}" + ): for cache in caches: with TestRun.group(f"Cache {cache.cache_id}"): for core in cache.get_core_devices(): if per_core: TestRun.LOGGER.info(f"Core {core.cache_id}-{core.core_id}") statistics = ( - core.get_statistics_flat( - io_class_id=0, stat_filter=[stat_filter]) if per_core - else cache.get_statistics_flat( - io_class_id=0, stat_filter=[stat_filter])) + core.get_statistics_flat(io_class_id=0, stat_filter=[stat_filter]) + if per_core + else cache.get_statistics_flat(io_class_id=0, stat_filter=[stat_filter]) + ) validate_statistics(statistics, stat_filter, per_core) if not per_core: break @@ -252,8 +258,10 @@ def test_ioclass_stats_sections(stat_filter, per_core, random_cls): IoClass.save_list_to_config_file(random_list, add_default_rule=False) cache.load_io_class(ioclass_config.default_config_file_path) - with TestRun.step(f"Validate displayed {stat_filter.name} statistics for every configured IO " - f"class for all {'cores' if per_core else 'caches'}"): + with TestRun.step( + f"Validate displayed {stat_filter.name} statistics for every configured IO " + f"class for all {'cores' if per_core else 'caches'}" + ): for cache in caches: with TestRun.group(f"Cache {cache.cache_id}"): for core in cache.get_core_devices(): @@ -261,16 +269,22 @@ def test_ioclass_stats_sections(stat_filter, per_core, random_cls): for class_id in range(ioclass_config.MAX_IO_CLASS_ID + 1): with TestRun.group(core_info + f"IO class id {class_id}"): statistics = ( - core.get_statistics_flat(class_id, [stat_filter]) if per_core - else cache.get_statistics_flat(class_id, [stat_filter])) + core.get_statistics_flat(class_id, [stat_filter]) + if per_core + else cache.get_statistics_flat(class_id, [stat_filter]) + ) validate_statistics(statistics, stat_filter, per_core) if stat_filter == StatsFilter.conf: # no percentage statistics for conf continue statistics_percents = ( core.get_statistics_flat( - class_id, [stat_filter], percentage_val=True) if per_core + class_id, [stat_filter], percentage_val=True + ) + if per_core else cache.get_statistics_flat( - class_id, [stat_filter], percentage_val=True)) + class_id, [stat_filter], percentage_val=True + ) + ) validate_statistics(statistics_percents, stat_filter, per_core) if not per_core: break @@ -301,8 +315,8 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1): cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO] ioclass_config.remove_ioclass_config() - cache_device = TestRun.disks['cache'] - core_device = TestRun.disks['core'] + cache_device = TestRun.disks["cache"] + core_device = TestRun.disks["core"] cache_device.create_partitions([Size(500, Unit.MebiByte)] * cache_count) core_device.create_partitions([Size(2, Unit.GibiByte)] * cache_count * cores_per_cache) @@ -316,16 +330,14 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1): caches, cores = [], [] for i, cache_device in enumerate(cache_devices): TestRun.LOGGER.info(f"Starting cache on {cache_device.path}") - cache = casadm.start_cache(cache_device, - force=True, - cache_mode=cache_modes[i], - cache_line_size=random_cls) + cache = casadm.start_cache( + cache_device, force=True, cache_mode=cache_modes[i], cache_line_size=random_cls + ) caches.append(cache) TestRun.LOGGER.info("Setting cleaning policy to NOP") cache.set_cleaning_policy(CleaningPolicy.nop) - for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]: - TestRun.LOGGER.info( - f"Adding core device {core_device.path} to cache {cache.cache_id}") + for core_device in core_devices[i * cores_per_cache : (i + 1) * cores_per_cache]: + TestRun.LOGGER.info(f"Adding core device {core_device.path} to cache {cache.cache_id}") core = cache.add_core(core_dev=core_device) core.reset_counters() cores.append(core)