Skip to content

Commit edc729b

Browse files
author
Robert Baldyga
authored
Merge pull request #475 from katlapinka/trim
Add trim test using devices supporting and not supporting discards
2 parents 96ce313 + 85df47c commit edc729b

File tree

3 files changed

+254
-1
lines changed

3 files changed

+254
-1
lines changed

ocf

test/functional/tests/io/trim/__init__.py

Whitespace-only changes.
Lines changed: 253 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,253 @@
1+
#
2+
# Copyright(c) 2020 Intel Corporation
3+
# SPDX-License-Identifier: BSD-3-Clause-Clear
4+
#
5+
import os
6+
import time
7+
import pytest
8+
import re
9+
from api.cas import casadm
10+
from api.cas.cache_config import CacheMode, CacheModeTrait, CleaningPolicy
11+
from core.test_run import TestRun
12+
from test_tools import fs_utils
13+
from test_tools.disk_utils import Filesystem
14+
from test_utils import os_utils
15+
from test_utils.size import Size, Unit
16+
from test_tools.fio.fio import Fio
17+
from test_tools.blktrace import BlkTrace, BlkTraceMask, RwbsKind
18+
from test_tools.fio.fio_param import ReadWrite, IoEngine
19+
from storage_devices.disk import DiskType, DiskTypeSet
20+
21+
22+
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
23+
def test_trim_start_discard():
24+
"""
25+
title: Check discarding cache device at cache start
26+
description: |
27+
Create 2 partitions on trim-supporting device, write pattern to both partitions,
28+
start blktrace against first one, start cache on first partition and check if discard
29+
requests were sent at all and only to the first partition.
30+
pass_criteria:
31+
- Partition used for cache is discarded.
32+
- Second partition is untouched - written pattern is preserved.
33+
"""
34+
with TestRun.step("Clearing dmesg"):
35+
TestRun.executor.run_expect_success("dmesg -C")
36+
37+
with TestRun.step("Preparing cache device"):
38+
dev = TestRun.disks['cache']
39+
dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
40+
cas_part = dev.partitions[0]
41+
non_cas_part = dev.partitions[1]
42+
43+
with TestRun.step("Writing different pattern on partitions"):
44+
cas_fio = write_pattern(cas_part.system_path)
45+
non_cas_fio = write_pattern(non_cas_part.system_path)
46+
cas_fio.run()
47+
non_cas_fio.run()
48+
49+
# TODO add blktracing for non-cas part
50+
with TestRun.step("Starting blktrace against first (cache) partition"):
51+
blktrace = BlkTrace(cas_part, BlkTraceMask.discard)
52+
blktrace.start_monitoring()
53+
54+
with TestRun.step("Starting cache"):
55+
cache = casadm.start_cache(cas_part, force=True)
56+
metadata_size = get_metadata_size_from_dmesg()
57+
58+
with TestRun.step("Stop blktrace and check if discard requests were issued"):
59+
cache_reqs = blktrace.stop_monitoring()
60+
cache_part_start = cas_part.begin
61+
62+
# CAS should discard cache device during cache start
63+
if len(cache_reqs) == 0:
64+
TestRun.fail("No discard requests issued to the cas partition!")
65+
66+
non_meta_sector = (cache_part_start + metadata_size).get_value(Unit.Blocks512)
67+
non_meta_size = (cas_part.size - metadata_size).get_value(Unit.Byte)
68+
for req in cache_reqs:
69+
if req.sector_number != non_meta_sector:
70+
TestRun.fail(f"Discard request issued to wrong sector: {req.sector_number}, "
71+
f"expected: {non_meta_sector}")
72+
if req.byte_count != non_meta_size:
73+
TestRun.fail(f"Discard request issued with wrong bytes count: {req.byte_count}, "
74+
f"expected: {non_meta_size} bytes")
75+
76+
cas_fio.read_write(ReadWrite.read)
77+
non_cas_fio.read_write(ReadWrite.read)
78+
cas_fio.verification_with_pattern("0x00")
79+
cas_fio.offset(metadata_size)
80+
cas_fio.run()
81+
non_cas_fio.run()
82+
83+
with TestRun.step("Stopping cache"):
84+
cache.stop()
85+
86+
87+
@pytest.mark.parametrizex("cache_mode", CacheMode.with_traits(CacheModeTrait.InsertWrite))
88+
@pytest.mark.parametrizex("filesystem", Filesystem)
89+
@pytest.mark.parametrizex("cleaning_policy", CleaningPolicy)
90+
@pytest.mark.parametrizex("trim_support_cache_core", [(False, True), (True, False), (True, True)])
91+
@pytest.mark.require_disk("ssd1", DiskTypeSet([DiskType.optane, DiskType.nand]))
92+
@pytest.mark.require_disk("ssd2", DiskTypeSet([DiskType.optane, DiskType.nand]))
93+
@pytest.mark.require_disk("hdd", DiskTypeSet([DiskType.hdd, DiskType.hdd4k]))
94+
def test_trim_device_discard_support(
95+
trim_support_cache_core, cache_mode, filesystem, cleaning_policy):
96+
"""
97+
title: Trim requests supported on various cache and core devices.
98+
description: |
99+
Handling trim requests support when various combination of SSD and HDD are used as
100+
cache and core.
101+
pass_criteria:
102+
- No system crash.
103+
- Discards detected on CAS.
104+
- Discards detected on SSD device when it is used as core.
105+
- Discards not detected on HDD device used as cache or core.
106+
- Discards not detected on cache device.
107+
"""
108+
109+
mount_point = "/mnt"
110+
111+
with TestRun.step(f"Create partitions on SSD and HDD devices."):
112+
TestRun.disks["ssd1"].create_partitions([Size(1, Unit.GibiByte)])
113+
TestRun.disks["ssd2"].create_partitions([Size(1, Unit.GibiByte)])
114+
TestRun.disks["hdd"].create_partitions([Size(1, Unit.GibiByte)])
115+
ssd1_dev = TestRun.disks["ssd1"].partitions[0]
116+
ssd2_dev = TestRun.disks["ssd2"].partitions[0]
117+
hdd_dev = TestRun.disks["hdd"].partitions[0]
118+
119+
with TestRun.step(f"Start cache and add core."):
120+
cache_dev = ssd1_dev if trim_support_cache_core[0] else hdd_dev
121+
core_dev = ssd2_dev if trim_support_cache_core[1] else hdd_dev
122+
123+
cache = casadm.start_cache(cache_dev, cache_mode, force=True)
124+
cache.set_cleaning_policy(cleaning_policy)
125+
core = cache.add_core(core_dev)
126+
127+
with TestRun.step("Make filesystem and mount it with discard option."):
128+
core.create_filesystem(filesystem)
129+
core.mount(mount_point, ["discard"])
130+
131+
with TestRun.step("Create random file."):
132+
test_file = fs_utils.create_random_test_file(os.path.join(mount_point, "test_file"),
133+
core_dev.size * 0.9)
134+
occupancy_before = core.get_occupancy()
135+
TestRun.LOGGER.info(str(core.get_statistics()))
136+
137+
with TestRun.step("Start blktrace monitoring on all devices."):
138+
blktraces = start_monitoring(core_dev, cache_dev, core)
139+
140+
with TestRun.step("Remove file."):
141+
os_utils.sync()
142+
os_utils.drop_caches()
143+
test_file.remove()
144+
145+
with TestRun.step(
146+
"Ensure that discards were detected by blktrace on proper devices."):
147+
discard_expected = {"core": trim_support_cache_core[1], "cache": False, "cas": True}
148+
stop_monitoring_and_check_discards(blktraces, discard_expected)
149+
150+
with TestRun.step("Ensure occupancy reduced."):
151+
occupancy_after = core.get_occupancy()
152+
TestRun.LOGGER.info(str(core.get_statistics()))
153+
154+
if occupancy_after >= occupancy_before:
155+
TestRun.LOGGER.error("Occupancy on core after removing test file greater than before.")
156+
else:
157+
TestRun.LOGGER.info("Occupancy on core after removing test file smaller than before.")
158+
159+
with TestRun.step("Check CAS sysfs properties values."):
160+
check_sysfs_properties(cache, cache_dev, core, core_dev.parent_device,
161+
core_supporting_discards=trim_support_cache_core[1])
162+
163+
164+
def check_sysfs_properties(cache, cache_dev, core, core_disk, core_supporting_discards):
165+
expected_discard_max_bytes = int(core_disk.get_discard_max_bytes()) \
166+
if core_supporting_discards else int(cache_dev.size.get_value())
167+
cas_discard_max_bytes = int(core.get_discard_max_bytes())
168+
compare_properties(cas_discard_max_bytes, expected_discard_max_bytes, "discard_max_bytes")
169+
170+
expected_discard_granularity = int(core_disk.get_discard_granularity()) \
171+
if core_supporting_discards else int(cache.get_cache_line_size())
172+
cas_discard_granularity = int(core.get_discard_granularity())
173+
compare_properties(
174+
cas_discard_granularity, expected_discard_granularity, "discard_granularity")
175+
176+
cas_discard_zeroes_data = int(core.get_discard_zeroes_data())
177+
if cas_discard_zeroes_data == 0:
178+
TestRun.LOGGER.info("CAS discard_zeroes_data value equals 0 as expected.")
179+
else:
180+
TestRun.LOGGER.error(f"CAS discard_zeroes_data value equals {cas_discard_zeroes_data}. "
181+
"Expected value for this property is 0.")
182+
183+
184+
def compare_properties(value, expected_value, property_name):
185+
if expected_value == value:
186+
TestRun.LOGGER.info(f"CAS {property_name} value is correct.")
187+
return
188+
TestRun.LOGGER.error(f"CAS property {property_name} value equals {value} and differs "
189+
f"from expected value: {expected_value}.")
190+
191+
192+
def stop_monitoring_and_check_discards(blktraces, discard_support):
193+
time.sleep(10)
194+
os_utils.sync()
195+
os_utils.drop_caches()
196+
time.sleep(2)
197+
198+
discard_flag = RwbsKind.D # Discard
199+
for key in blktraces.keys():
200+
output = blktraces[key].stop_monitoring()
201+
discard_messages = [h for h in output if discard_flag in h.rwbs]
202+
check_discards(len(discard_messages), blktraces[key].device, discard_support[key])
203+
204+
205+
def check_discards(discards_count, device, discards_expected):
206+
if discards_expected:
207+
if discards_count > 0:
208+
TestRun.LOGGER.info(
209+
f"{discards_count} TRIM instructions generated for {device.system_path}")
210+
else:
211+
TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.system_path}")
212+
else:
213+
if discards_count > 0:
214+
TestRun.LOGGER.error(
215+
f"{discards_count} TRIM instructions generated for {device.system_path}")
216+
else:
217+
TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.system_path}")
218+
219+
220+
def start_monitoring(core_dev, cache_dev, cas_dev):
221+
blktrace_core_dev = BlkTrace(core_dev, BlkTraceMask.discard)
222+
blktrace_cache_dev = BlkTrace(cache_dev, BlkTraceMask.discard)
223+
blktrace_cas = BlkTrace(cas_dev, BlkTraceMask.discard)
224+
225+
blktrace_core_dev.start_monitoring()
226+
blktrace_cache_dev.start_monitoring()
227+
blktrace_cas.start_monitoring()
228+
229+
return {"core": blktrace_core_dev, "cache": blktrace_cache_dev, "cas": blktrace_cas}
230+
231+
232+
def write_pattern(device):
233+
return (Fio().create_command()
234+
.io_engine(IoEngine.libaio)
235+
.read_write(ReadWrite.write)
236+
.target(device)
237+
.direct()
238+
.verification_with_pattern()
239+
)
240+
241+
242+
def get_metadata_size_from_dmesg():
243+
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
244+
for s in dmesg_out.split("\n"):
245+
if "Hash offset" in s:
246+
offset = re.search("[0-9]* kiB", s).group()
247+
offset = Size(int(re.search("[0-9]*", offset).group()), Unit.KibiByte)
248+
if "Hash size" in s:
249+
size = re.search("[0-9]* kiB", s).group()
250+
size = Size(int(re.search("[0-9]*", size).group()), Unit.KibiByte)
251+
252+
# Metadata is 128KiB aligned
253+
return (offset + size).align_up(128 * Unit.KibiByte.value)

0 commit comments

Comments
 (0)