|
| 1 | +# |
| 2 | +# Copyright(c) 2022 Intel Corporation |
| 3 | +# SPDX-License-Identifier: BSD-3-Clause |
| 4 | +# |
| 5 | + |
| 6 | +import re |
| 7 | +from itertools import cycle |
| 8 | + |
| 9 | +import pytest |
| 10 | + |
| 11 | +from api.cas import casadm, ioclass_config |
| 12 | +from api.cas.cache_config import CacheMode, CleaningPolicy, SeqCutOffPolicy |
| 13 | +from api.cas.casadm_params import StatsFilter |
| 14 | +from api.cas.ioclass_config import IoClass |
| 15 | +from core.test_run_utils import TestRun |
| 16 | +from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan |
| 17 | +from test_tools import fs_utils |
| 18 | +from test_tools.disk_utils import Filesystem |
| 19 | +from test_tools.fio.fio import Fio |
| 20 | +from test_tools.fio.fio_param import IoEngine, ReadWrite |
| 21 | +from test_utils.os_utils import sync, drop_caches, Udev |
| 22 | +from test_utils.size import Size, Unit |
| 23 | + |
| 24 | +num_of_caches = 4 |
| 25 | +cores_per_cache = 3 |
| 26 | +num_of_cores = num_of_caches * cores_per_cache |
| 27 | + |
| 28 | + |
| 29 | +@pytest.mark.parametrize("per_core", [False, True]) |
| 30 | +@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) |
| 31 | +@pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) |
| 32 | +def test_io_class_stats_core_cache(per_core): |
| 33 | + """ |
| 34 | + title: Open CAS statistics values for IO classes - per core/cache. |
| 35 | + description: Check Open CAS ability to display correct value in statistics |
| 36 | + for all supported IO classes for given core/cache device. |
| 37 | + pass_criteria: |
| 38 | + - proper statistics after fio |
| 39 | + - statistics doesn't change after stop and load caches |
| 40 | + """ |
| 41 | + |
| 42 | + with TestRun.step("Prepare devices."): |
| 43 | + cache_device = TestRun.disks['cache'] |
| 44 | + cache_device.create_partitions([Size(20, Unit.GibiByte)] * num_of_caches) |
| 45 | + cache_devices = cache_device.partitions |
| 46 | + |
| 47 | + core_device = TestRun.disks['core'] |
| 48 | + core_device.create_partitions([Size(10, Unit.GibiByte)] * num_of_cores) |
| 49 | + core_devices = core_device.partitions |
| 50 | + |
| 51 | + with TestRun.step("Start caches (one for each supported cache mode) and add core devices."): |
| 52 | + caches = [casadm.start_cache(dev, cache_mode=cache_mode, force=True) |
| 53 | + for dev, cache_mode in zip(cache_devices, CacheMode)] |
| 54 | + |
| 55 | + cores = [] |
| 56 | + for i, cache in zip(range(0, num_of_cores, cores_per_cache), caches): |
| 57 | + cores.extend([cache.add_core(dev) for dev in core_devices[i:i+cores_per_cache]]) |
| 58 | + |
| 59 | + for cache in caches: |
| 60 | + cache.set_cleaning_policy(CleaningPolicy.nop) |
| 61 | + cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) |
| 62 | + cache.purge_cache() |
| 63 | + cache.reset_counters() |
| 64 | + |
| 65 | + Udev.disable() |
| 66 | + |
| 67 | + with TestRun.step(f"Validate IO class usage statistics after start " |
| 68 | + f"for each {'core' if per_core else 'cache'}."): |
| 69 | + devices = cores if per_core else caches |
| 70 | + for dev in devices: |
| 71 | + stats = dev.get_statistics_flat(0, [StatsFilter.usage]) |
| 72 | + TestRun.LOGGER.info(f"Check stats for cache {dev.cache_id} " |
| 73 | + f"{f'core {dev.core_id}' if per_core else ''}") |
| 74 | + for name, value in stats.items(): |
| 75 | + check_value(name, value.get_value(), 0) |
| 76 | + |
| 77 | + with TestRun.step("Load IO class configuration file for all caches."): |
| 78 | + io_classes = IoClass.csv_to_list( |
| 79 | + fs_utils.read_file("/etc/opencas/ioclass-config.csv")) |
| 80 | + for io_class in io_classes: |
| 81 | + if 'metadata' in io_class.rule: |
| 82 | + io_class.allocation = 0 |
| 83 | + IoClass.save_list_to_config_file(io_classes, add_default_rule=False) |
| 84 | + [cache.load_io_class(ioclass_config.default_config_file_path) for cache in caches] |
| 85 | + |
| 86 | + with TestRun.step("Make filesystem on OpenCAS devices and mount it."): |
| 87 | + for core, fs in zip(cores, cycle(Filesystem)): |
| 88 | + mount_point = core.path.replace('/dev/', '/mnt/') |
| 89 | + core.create_filesystem(fs) |
| 90 | + core.mount(mount_point) |
| 91 | + sync() |
| 92 | + drop_caches() |
| 93 | + |
| 94 | + with TestRun.step("Run fio for each device and validate IO class usage, " |
| 95 | + "request and block level statistics values depending on cache mode."): |
| 96 | + saved_stats = [] |
| 97 | + sizes = get_sizes(io_classes) |
| 98 | + for io_class, core, file_size in zip(io_classes[2:], cores, sizes): |
| 99 | + cache_id = core.cache_id |
| 100 | + cache = caches[cache_id - 1] |
| 101 | + cache_mode = cache.get_cache_mode() |
| 102 | + |
| 103 | + core.reset_counters() |
| 104 | + cache.purge_cache() |
| 105 | + drop_caches() |
| 106 | + |
| 107 | + with TestRun.step(f"Testing cache {cache_id} core {core.core_id} " |
| 108 | + f"with IO class {io_class.id}. " |
| 109 | + f"Cache mode: {cache_mode}"): |
| 110 | + |
| 111 | + size_in_blocks = round(file_size.get_value(Unit.Blocks4096)) |
| 112 | + |
| 113 | + TestRun.LOGGER.info("Run fio.") |
| 114 | + fio = fio_params(core, file_size, direct=False if io_class.id != 22 else True) |
| 115 | + fio.run() |
| 116 | + sync() |
| 117 | + drop_caches() |
| 118 | + |
| 119 | + TestRun.LOGGER.info("Check statistics.") |
| 120 | + dev = core if per_core else cache |
| 121 | + stats = dev.get_statistics_flat( |
| 122 | + io_class.id, [StatsFilter.usage, StatsFilter.req, StatsFilter.blk]) |
| 123 | + stats_perc = dev.get_statistics_flat(io_class.id, [StatsFilter.usage], |
| 124 | + percentage_val=True) |
| 125 | + |
| 126 | + s = '' if per_core else '(s)' |
| 127 | + stats_pt_wa = [f'writes to exported object{s}', f'total to/from exported object{s}', |
| 128 | + f'writes to core{s}', f'total to/from core{s}'] |
| 129 | + stats_wb = ['occupancy', 'dirty', 'write full misses', 'write total', |
| 130 | + f'writes to exported object{s}', f'total to/from exported object{s}', |
| 131 | + 'writes to cache', 'total to/from cache'] |
| 132 | + stats_wt = ['occupancy', 'clean', 'write full misses', 'write total', |
| 133 | + f'writes to exported object{s}', f'total to/from exported object{s}', |
| 134 | + 'writes to cache', 'total to/from cache', |
| 135 | + f'writes to core{s}', f'total to/from core{s}'] |
| 136 | + |
| 137 | + # TODO: need proper values for pass-through reads, pass-through writes, |
| 138 | + # serviced requests, total requests and check correctness of other values |
| 139 | + |
| 140 | + for name, value in stats.items(): |
| 141 | + value = round(value) if type(value) is float \ |
| 142 | + else round(value.get_value(Unit.Blocks4096)) |
| 143 | + |
| 144 | + if cache_mode == CacheMode.PT or cache_mode == CacheMode.WA: |
| 145 | + expected_value = size_in_blocks if name in stats_pt_wa else 0 |
| 146 | + check_value(name, value, expected_value) |
| 147 | + |
| 148 | + elif cache_mode == CacheMode.WB: |
| 149 | + expected_value = size_in_blocks if name in stats_wb else 0 |
| 150 | + check_value(name, value, expected_value) |
| 151 | + |
| 152 | + elif cache_mode == CacheMode.WT: |
| 153 | + expected_value = size_in_blocks if name in stats_wt else 0 |
| 154 | + check_value(name, value, expected_value) |
| 155 | + |
| 156 | + for name, value in stats_perc.items(): |
| 157 | + if cache_mode == CacheMode.PT: |
| 158 | + expected_value = 0 |
| 159 | + epsilon_percentage = 0 |
| 160 | + check_perc_value(name, value, expected_value, epsilon_percentage) |
| 161 | + |
| 162 | + elif cache_mode == CacheMode.WA: |
| 163 | + expected_value = 0 |
| 164 | + epsilon_percentage = 0.5 if name == 'occupancy' else 0 |
| 165 | + check_perc_value(name, value, expected_value, epsilon_percentage) |
| 166 | + |
| 167 | + elif cache_mode == CacheMode.WB: |
| 168 | + occupancy = 100 * size_in_blocks / cache.size.get_value() |
| 169 | + expected_value = 100 if name == 'dirty' else \ |
| 170 | + occupancy if name == 'occupancy' else 0 |
| 171 | + epsilon_percentage = 0.5 if name in ('dirty', 'occupancy') else 0 |
| 172 | + check_perc_value(name, value, expected_value, epsilon_percentage) |
| 173 | + |
| 174 | + elif cache_mode == CacheMode.WT: |
| 175 | + occupancy = 100 * size_in_blocks / cache.size.get_value() |
| 176 | + expected_value = 100 if name == 'clean' else \ |
| 177 | + occupancy if name == 'occupancy' else 0 |
| 178 | + epsilon_percentage = 0.5 if name in ('clean', 'occupancy') else 0 |
| 179 | + check_perc_value(name, value, expected_value, epsilon_percentage) |
| 180 | + |
| 181 | + saved_stats.append(dev.get_statistics_flat(io_class.id, |
| 182 | + [StatsFilter.conf, StatsFilter.usage])) |
| 183 | + |
| 184 | + with TestRun.step("Stop and load caches back."): |
| 185 | + [core.unmount() for core in cores] |
| 186 | + casadm.stop_all_caches() |
| 187 | + caches = [casadm.load_cache(cache) for cache in cache_devices] |
| 188 | + |
| 189 | + with TestRun.step(f"Validate IO class statistics per {'core' if per_core else 'cache'} - " |
| 190 | + f"shall be the same as before stop."): |
| 191 | + stats = [] |
| 192 | + for io_class, core in zip(io_classes[2:], cores): |
| 193 | + cache_id = core.cache_id |
| 194 | + cache = caches[cache_id - 1] |
| 195 | + dev = core if per_core else cache |
| 196 | + stats.append(dev.get_statistics_flat(io_class.id, |
| 197 | + [StatsFilter.conf, StatsFilter.usage])) |
| 198 | + |
| 199 | + for saved_stat, stat, core, io_class in zip(saved_stats, stats, cores, io_classes[2:]): |
| 200 | + TestRun.LOGGER.info(f"Testing cache {core.cache_id} core {core.core_id} " |
| 201 | + f"with IO class {io_class.id}. ") |
| 202 | + for name, saved_value, value in zip(stat.keys(), saved_stat.values(), stat.values()): |
| 203 | + value = round(value.get_value(Unit.Blocks4096)) if type(value) is Size else value |
| 204 | + saved_value = round(saved_value.get_value(Unit.Blocks4096)) \ |
| 205 | + if type(saved_value) is Size else saved_value |
| 206 | + check_value(name, value, saved_value) |
| 207 | + |
| 208 | + with TestRun.step("Sum (except free) all values from statistics and " |
| 209 | + "compare it with statistics for cache."): |
| 210 | + for cache in caches: |
| 211 | + TestRun.LOGGER.info(f"Check stats for cache {cache.cache_id}.") |
| 212 | + occupancy = sum([core.get_statistics().usage_stats.occupancy for core in cores if |
| 213 | + core.cache_id == cache.cache_id]) |
| 214 | + dirty = sum([core.get_statistics().usage_stats.dirty for core in cores if |
| 215 | + core.cache_id == cache.cache_id]) |
| 216 | + clean = sum([core.get_statistics().usage_stats.clean for core in cores if |
| 217 | + core.cache_id == cache.cache_id]) |
| 218 | + cores_stats = [occupancy, dirty, clean] |
| 219 | + |
| 220 | + cache_occupancy = cache.get_statistics().usage_stats.occupancy |
| 221 | + cache_dirty = cache.get_statistics().usage_stats.dirty |
| 222 | + cache_clean = cache.get_statistics().usage_stats.clean |
| 223 | + cache_stats = [cache_occupancy, cache_dirty, cache_clean] |
| 224 | + |
| 225 | + for name, cores_sum, cache_stat in zip( |
| 226 | + ('occupancy', 'dirty', 'clean'), cores_stats, cache_stats): |
| 227 | + check_value(name, cores_sum, cache_stat) |
| 228 | + |
| 229 | + |
| 230 | +def get_sizes(io_classes): |
| 231 | + sizes = [Size(int(re.search(r"\d+", io_class.rule).group()), Unit.Byte) |
| 232 | + for io_class in io_classes[2:-2]] |
| 233 | + sizes.extend([sizes[-1] + Size(100, Unit.MebiByte), Size(1, Unit.Blocks4096)]) |
| 234 | + |
| 235 | + return sizes |
| 236 | + |
| 237 | + |
| 238 | +def check_value(name, actual_value, expected_value): |
| 239 | + if actual_value != expected_value: |
| 240 | + TestRun.LOGGER.error(f"Bad {name} value. " |
| 241 | + f"Expected: {expected_value}, actual: {actual_value}.") |
| 242 | + else: |
| 243 | + TestRun.LOGGER.info(f"Proper {name} value: {actual_value}.") |
| 244 | + |
| 245 | + |
| 246 | +def check_perc_value(name, actual_value, expected_value, epsilon_percentage): |
| 247 | + if abs(expected_value - actual_value) > epsilon_percentage: |
| 248 | + TestRun.LOGGER.error(f"Bad {name} percentage value. " |
| 249 | + f"Expected: {expected_value}, actual: {actual_value}.") |
| 250 | + else: |
| 251 | + TestRun.LOGGER.info(f"Proper {name} percentage value: {actual_value}.") |
| 252 | + |
| 253 | + |
| 254 | +def fio_params(core, size, direct=False): |
| 255 | + name = f"{core.mount_point}/{round(size.get_value())}{'_direct' if direct else ''}" |
| 256 | + fio = Fio().create_command() \ |
| 257 | + .io_engine(IoEngine.libaio) \ |
| 258 | + .read_write(ReadWrite.write) \ |
| 259 | + .io_depth(1) \ |
| 260 | + .block_size(Size(1, Unit.Blocks4096)) \ |
| 261 | + .num_jobs(1) \ |
| 262 | + .direct(direct) \ |
| 263 | + .file_size(size) \ |
| 264 | + .target(name) |
| 265 | + |
| 266 | + return fio |
0 commit comments