29
29
CONFIG_NAME_ABS = os .path .join (defs .CFG_LOCATION , CONFIG_NAME_REL )
30
30
CONFIG_DICT = json .load (open (CONFIG_NAME_ABS , encoding = "utf-8" ))
31
31
32
- SERVER_STARTUP_TIME = CONFIG_DICT ["server_startup_time" ]
32
+ # Number of seconds to wait for the iperf3 server to start
33
+ SERVER_STARTUP_TIME_SEC = 2
33
34
IPERF3 = "iperf3-vsock"
34
35
THROUGHPUT = "throughput"
35
36
DURATION = "duration"
44
45
DURATION_UNIT = "seconds"
45
46
CPU_UTILIZATION_UNIT = "percentage"
46
47
48
+ # How many clients/servers should be spawned per vcpu
49
+ LOAD_FACTOR = 1
50
+
51
+ # Time (in seconds) for which iperf "warms up"
52
+ WARMUP_SEC = 3
53
+
54
+ # Time (in seconds) for which iperf runs after warmup is done
55
+ RUNTIME_SEC = 20
56
+
57
+ # Dictionary mapping modes (guest-to-host, host-to-guest, bidirectional) to arguments passed to the iperf3 clients spawned
58
+ MODE_MAP = {"bd" : ["" , "-R" ], "g2h" : ["" ], "h2g" : ["-R" ]}
59
+
47
60
48
61
# pylint: disable=R0903
49
62
class VsockThroughputBaselineProvider (BaselineProvider ):
@@ -100,7 +113,7 @@ def produce_iperf_output(
100
113
current_avail_cpu += 1
101
114
102
115
# Wait for iperf3 servers to start.
103
- time .sleep (SERVER_STARTUP_TIME )
116
+ time .sleep (SERVER_STARTUP_TIME_SEC )
104
117
105
118
# Start `vcpus` iperf3 clients. We can not use iperf3 parallel streams
106
119
# due to non deterministic results and lack of scaling.
@@ -131,7 +144,7 @@ def spawn_iperf_client(conn, client_idx, mode):
131
144
cpu_load_future = executor .submit (
132
145
get_cpu_percent ,
133
146
basevm .jailer_clone_pid ,
134
- runtime - SERVER_STARTUP_TIME ,
147
+ runtime - SERVER_STARTUP_TIME_SEC ,
135
148
omit ,
136
149
)
137
150
@@ -203,64 +216,72 @@ def consume_iperf_output(cons, result):
203
216
cons .consume_stat ("Avg" , CPU_UTILIZATION_VCPUS_TOTAL , cpu_util_guest )
204
217
205
218
206
- def pipes (basevm , current_avail_cpu , env_id ):
219
+ def pipe (basevm , current_avail_cpu , env_id , mode , payload_length ):
207
220
"""Producer/Consumer pipes generator."""
208
- for mode in CONFIG_DICT ["modes" ]:
209
- # We run bi-directional tests only on uVM with more than 2 vCPus
210
- # because we need to pin one iperf3/direction per vCPU, and since we
211
- # have two directions, we need at least two vCPUs.
212
- if mode == "bd" and basevm .vcpus_count < 2 :
213
- continue
214
-
215
- for protocol in CONFIG_DICT ["protocols" ]:
216
- for payload_length in protocol ["payload_length" ]:
217
- iperf_guest_cmd_builder = (
218
- CmdBuilder (IPERF3 )
219
- .with_arg ("--vsock" )
220
- .with_arg ("-c" , 2 )
221
- .with_arg ("--json" )
222
- .with_arg ("--omit" , protocol ["omit" ])
223
- .with_arg ("--time" , CONFIG_DICT ["time" ])
224
- )
225
-
226
- if payload_length != "DEFAULT" :
227
- iperf_guest_cmd_builder = iperf_guest_cmd_builder .with_arg (
228
- "--len" , f"{ payload_length } "
229
- )
230
-
231
- iperf3_id = f"vsock-p{ payload_length } -{ mode } "
232
-
233
- cons = consumer .LambdaConsumer (
234
- metadata_provider = DictMetadataProvider (
235
- CONFIG_DICT ["measurements" ],
236
- VsockThroughputBaselineProvider (env_id , iperf3_id ),
237
- ),
238
- func = consume_iperf_output ,
239
- )
221
+ iperf_guest_cmd_builder = (
222
+ CmdBuilder (IPERF3 )
223
+ .with_arg ("--vsock" )
224
+ .with_arg ("-c" , 2 )
225
+ .with_arg ("--json" )
226
+ .with_arg ("--omit" , WARMUP_SEC )
227
+ .with_arg ("--time" , RUNTIME_SEC )
228
+ )
229
+
230
+ if payload_length != "DEFAULT" :
231
+ iperf_guest_cmd_builder = iperf_guest_cmd_builder .with_arg (
232
+ "--len" , f"{ payload_length } "
233
+ )
240
234
241
- prod_kwargs = {
242
- "guest_cmd_builder" : iperf_guest_cmd_builder ,
243
- "basevm" : basevm ,
244
- "current_avail_cpu" : current_avail_cpu ,
245
- "runtime" : CONFIG_DICT ["time" ],
246
- "omit" : protocol ["omit" ],
247
- "load_factor" : CONFIG_DICT ["load_factor" ],
248
- "modes" : CONFIG_DICT ["modes" ][mode ],
249
- }
250
- prod = producer .LambdaProducer (produce_iperf_output , prod_kwargs )
251
- yield cons , prod , f"{ env_id } /{ iperf3_id } "
235
+ iperf3_id = f"vsock-p{ payload_length } -{ mode } "
236
+
237
+ cons = consumer .LambdaConsumer (
238
+ metadata_provider = DictMetadataProvider (
239
+ CONFIG_DICT ["measurements" ],
240
+ VsockThroughputBaselineProvider (env_id , iperf3_id ),
241
+ ),
242
+ func = consume_iperf_output ,
243
+ )
244
+
245
+ prod_kwargs = {
246
+ "guest_cmd_builder" : iperf_guest_cmd_builder ,
247
+ "basevm" : basevm ,
248
+ "current_avail_cpu" : current_avail_cpu ,
249
+ "runtime" : RUNTIME_SEC ,
250
+ "omit" : WARMUP_SEC ,
251
+ "load_factor" : LOAD_FACTOR ,
252
+ "modes" : MODE_MAP [mode ],
253
+ }
254
+ prod = producer .LambdaProducer (produce_iperf_output , prod_kwargs )
255
+ return cons , prod , f"{ env_id } /{ iperf3_id } "
252
256
253
257
254
258
@pytest .mark .nonci
255
259
@pytest .mark .timeout (1200 )
256
- @pytest .mark .parametrize ("vcpus" , [1 , 2 ])
260
+ @pytest .mark .parametrize ("vcpus" , [1 , 2 ], ids = ["1vcpu" , "2vcpu" ])
261
+ @pytest .mark .parametrize (
262
+ "payload_length" , ["DEFAULT" , "1024K" ], ids = ["pDEFAULT" , "p1024K" ]
263
+ )
264
+ @pytest .mark .parametrize ("mode" , ["g2h" , "h2g" , "bd" ])
257
265
def test_vsock_throughput (
258
- microvm_factory , network_config , guest_kernel , rootfs , vcpus , st_core
266
+ microvm_factory ,
267
+ network_config ,
268
+ guest_kernel ,
269
+ rootfs ,
270
+ vcpus ,
271
+ payload_length ,
272
+ mode ,
273
+ st_core ,
259
274
):
260
275
"""
261
276
Test vsock throughput for multiple vm configurations.
262
277
"""
263
278
279
+ # We run bi-directional tests only on uVM with more than 2 vCPus
280
+ # because we need to pin one iperf3/direction per vCPU, and since we
281
+ # have two directions, we need at least two vCPUs.
282
+ if mode == "bd" and vcpus < 2 :
283
+ pytest .skip ("bidrectional test only done with at least 2 vcpus" )
284
+
264
285
mem_size_mib = 1024
265
286
vm = microvm_factory .build (guest_kernel , rootfs , monitor_memory = False )
266
287
vm .spawn ()
@@ -287,12 +308,14 @@ def test_vsock_throughput(
287
308
current_avail_cpu += 1
288
309
assert vm .pin_vcpu (i , current_avail_cpu ), f"Failed to pin fc_vcpu { i } thread."
289
310
290
- for cons , prod , tag in pipes (
311
+ cons , prod , tag = pipe (
291
312
vm ,
292
313
current_avail_cpu + 1 ,
293
314
f"{ guest_kernel .name ()} /{ rootfs .name ()} /{ guest_config } " ,
294
- ):
295
- st_core .add_pipe (prod , cons , tag )
315
+ mode ,
316
+ payload_length ,
317
+ )
318
+ st_core .add_pipe (prod , cons , tag )
296
319
297
320
# Start running the commands on guest, gather results and verify pass
298
321
# criteria.
0 commit comments