66from ConfigValidator .Config .Models .OperationType import OperationType
77from ProgressManager .Output .OutputProcedure import OutputProcedure as output
88
9- from typing import Dict , List , Any , Optional
9+ from typing import Dict , Any , Optional
1010from pathlib import Path
1111from os .path import dirname , realpath
1212
13- import os
14- import signal
15- import pandas as pd
1613import time
1714import subprocess
1815import shlex
1916
17+ from Plugins .Profilers .PicoCM3 import PicoCM3 , CM3DataTypes , CM3Channels
18+
19+ # TODO
20+ # Finish parsing / averaging the values to place in the results table
21+ # Finish the documentation in 2 places
22+ # Test the implementation
23+ # Write actual test code to test the implementaiton
24+
2025class RunnerConfig :
2126 ROOT_DIR = Path (dirname (realpath (__file__ )))
2227
2328 # ================================ USER SPECIFIC CONFIG ================================
2429 """The name of the experiment."""
25- name : str = "new_picocm3_experiment "
30+ name : str = "new_runner_experiment "
2631
2732 """The path in which Experiment Runner will create a folder with the name `self.name`, in order to store the
2833 results from this experiment. (Path does not need to exist - it will be created if necessary.)
@@ -52,25 +57,34 @@ def __init__(self):
5257 (RunnerEvents .POPULATE_RUN_DATA , self .populate_run_data ),
5358 (RunnerEvents .AFTER_EXPERIMENT , self .after_experiment )
5459 ])
60+
61+ self .latest_measurement = None
5562 self .run_table_model = None # Initialized later
5663 output .console_log ("Custom config loaded" )
5764
5865 def create_run_table_model (self ) -> RunTableModel :
5966 """Create and return the run_table model here. A run_table is a List (rows) of tuples (columns),
6067 representing each run performed"""
61- sampling_factor = FactorModel ("sampling" , [10 , 50 , 100 , 200 , 500 , 1000 ])
68+ workers_factor = FactorModel ("num_workers" , [1 , 2 , 3 , 4 ])
69+ write_factor = FactorModel ("write_size" , [256 , 1024 , 2048 , 4096 ])
70+
6271 self .run_table_model = RunTableModel (
63- factors = [sampling_factor ],
64- data_columns = ['dram_energy' , 'package_energy' ,
65- 'pp0_energy' , 'pp1_energy' ]
72+ factors = [workers_factor , write_factor ],
73+ data_columns = ['timestamp' , 'channel_1(A)' , 'channel_2(off)' , 'channel_3(off)' ]) # Channel 1 is in Amps
6674
67- )
6875 return self .run_table_model
6976
7077 def before_experiment (self ) -> None :
7178 """Perform any activity required before starting the experiment here
7279 Invoked only once during the lifetime of the program."""
73- pass
80+
81+ # Setup the picolog cm3 here (the parameters passed are also the default)
82+ self .meter = PicoCM3 (sample_frequency = 1000 , # Sample the CM3 every second
83+ mains_setting = 0 , # Account for 50hz mains frequency
84+ # Which channels are enabled in what mode
85+ channel_settings = { CM3Channels .PLCM3_CHANNEL_1 : CM3DataTypes .PLCM3_1_MILLIVOLT ,
86+ CM3Channels .PLCM3_CHANNEL_2 : CM3DataTypes .PLCM3_OFF ,
87+ CM3Channels .PLCM3_CHANNEL_3 : CM3DataTypes .PLCM3_OFF })
7488
7589 def before_run (self ) -> None :
7690 """Perform any activity required before starting a run.
@@ -81,34 +95,43 @@ def start_run(self, context: RunnerContext) -> None:
8195 """Perform any activity required for starting the run here.
8296 For example, starting the target system to measure.
8397 Activities after starting the run should also be performed here."""
84- pass
98+
99+ num_workers = context .run_variation ['num_workers' ]
100+ write_size = context .run_variation ['write_size' ]
101+
102+ # Start stress-ng
103+ stress_cmd = f"sudo stress-ng \
104+ --hdd { num_workers } \
105+ --hdd-write-size { write_size } \
106+ --hdd-ops 1000000 \
107+ --hdd-dev /dev/sda1 \
108+ --timeout 60s \
109+ --metrics-brief"
110+
111+ stress_log = open (f'{ context .run_dir } /stress-ng-log.log' , 'w' )
112+ self .stress_ng = subprocess .Popen (shlex .split (stress_cmd ), stdout = stress_log )
85113
86114 def start_measurement (self , context : RunnerContext ) -> None :
87115 """Perform any activity required for starting measurements."""
88- sampling_interval = context .run_variation ['sampling' ]
89-
90- profiler_cmd = f'sudo energibridge \
91- --interval { sampling_interval } \
92- --max-execution 20 \
93- --output { context .run_dir / "energibridge.csv" } \
94- --summary \
95- python3 examples/energibridge-profiling/primer.py'
96-
97- #time.sleep(1) # allow the process to run a little before measuring
98- energibridge_log = open (f'{ context .run_dir } /energibridge.log' , 'w' )
99- self .profiler = subprocess .Popen (shlex .split (profiler_cmd ), stdout = energibridge_log )
100-
116+
117+ num_workers = context .run_variation ['num_workers' ]
118+ write_size = context .run_variation ['write_size' ]
119+
120+ # Start the picologs measurements here, create a unique log file for each
121+ self .latest_log = str (context .run_dir .resolve () / f'pico_run_{ num_workers } _{ write_size } .log' )
122+ self .latest_measurement = self .meter .log (lambda : self .stress_ng .poll () != None , self .latest_log )
123+
101124 def interact (self , context : RunnerContext ) -> None :
102125 """Perform any interaction with the running target system here, or block here until the target finishes."""
103126
104- # No interaction. We just run it for XX seconds.
105- # Another example would be to wait for the target to finish, e.g. via `self.target.wait()`
106- output .console_log ("Running program for 20 seconds" )
107- time .sleep (20 )
127+ # Wait the maximum timeout for stress-ng to finish or time.sleep(60)
128+ self .stress_ng .wait ()
108129
109130 def stop_measurement (self , context : RunnerContext ) -> None :
110131 """Perform any activity here required for stopping measurements."""
111- self .profiler .wait ()
132+
133+ # Wait for stress-ng to finish
134+ self .stress_ng .wait ()
112135
113136 def stop_run (self , context : RunnerContext ) -> None :
114137 """Perform any activity here required for stopping the run.
@@ -120,14 +143,29 @@ def populate_run_data(self, context: RunnerContext) -> Optional[Dict[str, Any]]:
120143 You can also store the raw measurement data under `context.run_dir`
121144 Returns a dictionary with keys `self.run_table_model.data_columns` and their values populated"""
122145
123- # energibridge.csv - Power consumption of the whole system
124- df = pd .read_csv (context .run_dir / f"energibridge.csv" )
125- run_data = {
126- 'dram_energy' : round (df ['DRAM_ENERGY (J)' ].sum (), 3 ),
127- 'package_energy' : round (df ['PACKAGE_ENERGY (J)' ].sum (), 3 ),
128- 'pp0_energy' : round (df ['PP0_ENERGY (J)' ].sum (), 3 ),
129- 'pp1_energy' : round (df ['PP1_ENERGY (J)' ].sum (), 3 ),
130- }
146+ run_data = {k : [] for k in self .run_table_model .data_columns }
147+
148+ # Pass data through variables
149+ if self .latest_measurement != {}:
150+ for k , v in self .latest_measurement .items ():
151+ run_data ['timestamp' ].append (k )
152+ run_data ['channel_1(A)' ].append (v [0 ][0 ])
153+ run_data ['channel_2(off)' ].append (v [1 ][0 ])
154+ run_data ['channel_3(off)' ].append (v [2 ][0 ])
155+
156+ # Or through a log file
157+ else :
158+ with open (self .latest_log ) as f :
159+ lines = f .readlines ()
160+
161+ for line in lines :
162+ channel_vals = line .split ("," )
163+
164+ run_data ['timestamp' ].append (channel_vals [0 ])
165+ run_data ['channel_1(A)' ].append (channel_vals [1 ].split (" " )[0 ])
166+ run_data ['channel_2(off)' ].append (channel_vals [2 ].split (" " )[0 ])
167+ run_data ['channel_3(off)' ].append (channel_vals [3 ].split (" " )[0 ])
168+
131169 return run_data
132170
133171 def after_experiment (self ) -> None :
0 commit comments