@@ -145,12 +145,16 @@ def mesh_agent(hosts, test_config, log_path):
145145 mesh_ip = test_config .get ("mesh_ip" , None )
146146
147147 if not mesh_ip :
148- logger .error (f"Host '{ mesh_agent_name } ' not found in topology.yaml and no mesh_ip provided." )
148+ logger .error (
149+ f"Host '{ mesh_agent_name } ' not found in topology.yaml and no mesh_ip provided."
150+ )
149151 raise RuntimeError (
150152 f"No mesh-agent name '{ mesh_agent_name } ' found in hosts and no mesh_ip provided in test_config."
151153 )
152154 else :
153- logger .info (f"Assumed that mesh agent is running, getting IP from topology config: { mesh_ip } " )
155+ logger .info (
156+ f"Assumed that mesh agent is running, getting IP from topology config: { mesh_ip } "
157+ )
154158 mesh_agent .external = True
155159 mesh_agent .mesh_ip = mesh_ip
156160 mesh_agent .p = test_config .get ("mesh_port" , mesh_agent .p )
@@ -223,7 +227,9 @@ def media_config(hosts: dict) -> None:
223227 if host .topology .extra_info .media_proxy .get ("st2110" , False ):
224228 pf_addr = host .network_interfaces [if_idx ].pci_address .lspci
225229 vfs = nicctl .vfio_list (pf_addr )
226- host .st2110_dev = host .topology .extra_info .media_proxy .get ("st2110_dev" , None )
230+ host .st2110_dev = host .topology .extra_info .media_proxy .get (
231+ "st2110_dev" , None
232+ )
227233 if not host .st2110_dev and not vfs :
228234 nicctl .create_vfs (pf_addr )
229235 vfs = nicctl .vfio_list (pf_addr )
@@ -238,25 +244,44 @@ def media_config(hosts: dict) -> None:
238244 f"Still no VFs on interface { host .network_interfaces [if_idx ].pci_address_lspci } even after creating VFs!"
239245 )
240246 host .vfs = vfs
241- host .st2110_ip = host .topology .extra_info .media_proxy .get ("st2110_ip" , f"192.168.0.{ last_oct } " )
247+ host .st2110_ip = host .topology .extra_info .media_proxy .get (
248+ "st2110_ip" , f"192.168.0.{ last_oct } "
249+ )
242250 if_idx += 1
243251 if host .topology .extra_info .media_proxy .get ("rdma" , False ):
244- if int (host .network_interfaces [if_idx ].virtualization .get_current_vfs ()) > 0 :
245- nicctl .disable_vf (str (host .network_interfaces [if_idx ].pci_address .lspci ))
252+ if (
253+ int (
254+ host .network_interfaces [if_idx ].virtualization .get_current_vfs ()
255+ )
256+ > 0
257+ ):
258+ nicctl .disable_vf (
259+ str (host .network_interfaces [if_idx ].pci_address .lspci )
260+ )
246261 net_adap_ips = host .network_interfaces [if_idx ].ip .get_ips ().v4
247262 rdma_ip = host .topology .extra_info .media_proxy .get ("rdma_ip" , False )
248263 if rdma_ip :
249- rdma_ip = IPv4Interface (f"{ rdma_ip } " if "/" in rdma_ip else f"{ rdma_ip } /24" )
264+ rdma_ip = IPv4Interface (
265+ f"{ rdma_ip } " if "/" in rdma_ip else f"{ rdma_ip } /24"
266+ )
250267 elif net_adap_ips and not rdma_ip :
251268 rdma_ip = net_adap_ips [0 ]
252269 if not rdma_ip or (rdma_ip not in net_adap_ips ):
253- rdma_ip = IPv4Interface (f"192.168.1.{ last_oct } /24" ) if not rdma_ip else rdma_ip
254- logger .info (f"IP { rdma_ip } not found on RDMA network interface, setting: { rdma_ip } " )
270+ rdma_ip = (
271+ IPv4Interface (f"192.168.1.{ last_oct } /24" )
272+ if not rdma_ip
273+ else rdma_ip
274+ )
275+ logger .info (
276+ f"IP { rdma_ip } not found on RDMA network interface, setting: { rdma_ip } "
277+ )
255278 host .network_interfaces [if_idx ].ip .add_ip (rdma_ip )
256279 host .rdma_ip = str (rdma_ip .ip )
257280 logger .info (f"VFs on { host .name } are: { host .vfs } " )
258281 except IndexError :
259- raise IndexError (f"Not enough network adapters available for tests! Expected: { if_idx + 1 } " )
282+ raise IndexError (
283+ f"Not enough network adapters available for tests! Expected: { if_idx + 1 } "
284+ )
260285 except AttributeError :
261286 logger .warning (
262287 f"Extra info media proxy in topology config for { host .name } is not set, skipping media config setup for this host."
@@ -282,10 +307,16 @@ def cleanup_processes(hosts: dict) -> None:
282307 try :
283308 connection = host .connection
284309 # connection.enable_sudo()
285- connection .execute_command (f"pgrep -f '{ pattern } '" , stderr_to_stdout = True )
286- connection .execute_command (f"pkill -9 -f '{ pattern } '" , stderr_to_stdout = True )
310+ connection .execute_command (
311+ f"pgrep -f '{ pattern } '" , stderr_to_stdout = True
312+ )
313+ connection .execute_command (
314+ f"pkill -9 -f '{ pattern } '" , stderr_to_stdout = True
315+ )
287316 except Exception as e :
288- logger .warning (f"Failed to check/kill processes matching { pattern } on { host .name } : { e } " )
317+ logger .warning (
318+ f"Failed to check/kill processes matching { pattern } on { host .name } : { e } "
319+ )
289320 logger .info ("Cleanup of processes completed." )
290321
291322
@@ -311,15 +342,19 @@ def check_iommu(hosts: dict[str, Host]) -> None:
311342 iommu_not_enabled_hosts = []
312343 for host in hosts .values ():
313344 try :
314- output = host .connection .execute_command ("ls -1 /sys/kernel/iommu_groups | wc -l" , shell = True , timeout = 10 )
345+ output = host .connection .execute_command (
346+ "ls -1 /sys/kernel/iommu_groups | wc -l" , shell = True , timeout = 10
347+ )
315348 if int (output .stdout .strip ()) == 0 :
316349 logger .error (f"IOMMU is not enabled on host { host .name } ." )
317350 iommu_not_enabled_hosts .append (host .name )
318351 except Exception as e :
319352 logger .exception (f"Failed to check IOMMU status on host { host .name } ." )
320353 iommu_not_enabled_hosts .append (host .name )
321354 if iommu_not_enabled_hosts :
322- pytest .exit (f"IOMMU is not enabled on hosts: { ', ' .join (iommu_not_enabled_hosts )} . Aborting test session." )
355+ pytest .exit (
356+ f"IOMMU is not enabled on hosts: { ', ' .join (iommu_not_enabled_hosts )} . Aborting test session."
357+ )
323358 else :
324359 logger .info ("IOMMU is enabled on all hosts." )
325360
@@ -332,14 +367,20 @@ def enable_hugepages(hosts: dict[str, Host]) -> None:
332367 for host in hosts .values ():
333368 if not _check_hugepages (host ):
334369 try :
335- host .connection .execute_command ("sudo sysctl -w vm.nr_hugepages=2048" , shell = True , timeout = 10 )
370+ host .connection .execute_command (
371+ "sudo sysctl -w vm.nr_hugepages=2048" , shell = True , timeout = 10
372+ )
336373 logger .info (f"Hugepages enabled on host { host .name } ." )
337374 except (RemoteProcessTimeoutExpired , ConnectionCalledProcessError ):
338375 logger .exception (f"Failed to enable hugepages on host { host .name } ." )
339- pytest .exit (f"Failed to enable hugepages on host { host .name } . Aborting test session." )
376+ pytest .exit (
377+ f"Failed to enable hugepages on host { host .name } . Aborting test session."
378+ )
340379 if not _check_hugepages (host ):
341380 logger .error (f"Hugepages could not be enabled on host { host .name } ." )
342- pytest .exit (f"Hugepages could not be enabled on host { host .name } . Aborting test session." )
381+ pytest .exit (
382+ f"Hugepages could not be enabled on host { host .name } . Aborting test session."
383+ )
343384 else :
344385 logger .info (f"Hugepages are already enabled on host { host .name } ." )
345386
@@ -349,7 +390,9 @@ def _check_hugepages(host: Host) -> bool:
349390 Check if hugepages are enabled on the host.
350391 """
351392 try :
352- output = host .connection .execute_command ("cat /proc/sys/vm/nr_hugepages" , shell = True , timeout = 10 )
393+ output = host .connection .execute_command (
394+ "cat /proc/sys/vm/nr_hugepages" , shell = True , timeout = 10
395+ )
353396 return int (output .stdout .strip ()) > 0
354397 except (RemoteProcessTimeoutExpired , ConnectionCalledProcessError ):
355398 logger .exception (f"Failed to check hugepages status on host { host .name } ." )
@@ -395,7 +438,9 @@ def log_case(request, caplog: pytest.LogCaptureFixture):
395438 os .makedirs (os .path .join (LOG_FOLDER , "latest" , case_folder ), exist_ok = True )
396439 logfile = os .path .join (LOG_FOLDER , "latest" , f"{ case_id } .log" )
397440 fh = logging .FileHandler (logfile )
398- formatter = request .session .config .pluginmanager .get_plugin ("logging-plugin" ).formatter
441+ formatter = request .session .config .pluginmanager .get_plugin (
442+ "logging-plugin"
443+ ).formatter
399444 format = AmberLogFormatter (formatter )
400445 fh .setFormatter (format )
401446 fh .setLevel (logging .DEBUG )
0 commit comments