diff --git a/docker/structure.sql b/docker/structure.sql index f4ee4d2ae..411c0436a 100644 --- a/docker/structure.sql +++ b/docker/structure.sql @@ -322,6 +322,23 @@ CREATE TRIGGER categories_moddatetime EXECUTE PROCEDURE moddatetime (updated_at); +CREATE TABLE micro_phases ( + id SERIAL PRIMARY KEY, + run_id uuid NOT NULL REFERENCES runs(id) ON DELETE CASCADE ON UPDATE CASCADE, + name text NOT NULL, + start_time bigint NOT NULL, + end_time bigint NOT NULL, + created_at timestamp with time zone DEFAULT now(), + updated_at timestamp with time zone +); + +CREATE INDEX "micro_phases_run_id" ON "micro_phases" USING HASH ("run_id"); +CREATE TRIGGER micro_phases_moddatetime + BEFORE UPDATE ON micro_phases + FOR EACH ROW + EXECUTE PROCEDURE moddatetime (updated_at); + + CREATE TABLE phase_stats ( id SERIAL PRIMARY KEY, run_id uuid NOT NULL REFERENCES runs(id) ON DELETE CASCADE ON UPDATE CASCADE, diff --git a/lib/notes.py b/lib/notes.py index fa5ebb156..4cfc0dc2d 100644 --- a/lib/notes.py +++ b/lib/notes.py @@ -1,4 +1,4 @@ -from re import fullmatch +import re from lib.db import DB @@ -22,18 +22,15 @@ def save_to_db(self, run_id): """, params=(run_id, note['detail_name'], note['note'], int(note['timestamp'])) ) + def parse_and_add_notes(self, detail_name, data): + for match in re.findall(r'^(\d{16}) (.+)$', data, re.MULTILINE): + self.__notes.append({'note': match[1], 'detail_name': detail_name, 'timestamp': match[0]}) - def parse_note(self, line): - if match := fullmatch(r'^(\d{16}) (.+)', line): - return int(match[1]), match[2] - return None - - def add_note(self, note): - self.__notes.append(note) + def add_note(self, note, detail_name, timestamp): + self.__notes.append({'note': note , 'detail_name': detail_name, 'timestamp': timestamp}) if __name__ == '__main__': import argparse - import time parser = argparse.ArgumentParser() parser.add_argument('run_id', help='Please supply a run_id to attribute the measurements to') @@ -41,7 +38,5 @@ def add_note(self, note): args = parser.parse_args() # script will exit if arguments not present notes = Notes() - notes.add_note({'note': 'This is my note', - 'timestamp': int(time.time_ns() / 1000), - 'detail_name': 'Arnes_ Container'}) + notes.parse_and_add_notes('my container', '1234567890123456 My note') notes.save_to_db(args.run_id) diff --git a/lib/scenario_runner.py b/lib/scenario_runner.py index d61cf689f..dcc0adadc 100644 --- a/lib/scenario_runner.py +++ b/lib/scenario_runner.py @@ -138,12 +138,23 @@ def __init__(self, ).get('sampling_rate', 0) del self._arguments['self'] # self is not needed and also cannot be serialzed. We remove it - + self._safe_post_processing_steps = ( + ('end_measurement', {'skip_on_already_ended': True}), + ('patch_phases', {}), + ('read_container_logs', {}), + ('stop_metric_providers', {}), + ('read_and_cleanup_processes', {}), + ('store_phases', {}), + ('save_notes_runner', {}), + ('save_stdout_logs', {}), + ('save_warnings', {}), + ('process_phase_stats', {}), + ) # transient variables that are created by the runner itself # these are accessed and processed on cleanup and then reset # They are __ as they should not be changed because this could break the state of the runner - self.__stdout_logs = OrderedDict() + self.__stdout_logs = [] self.__containers = {} self.__networks = [] self.__ps_to_kill = [] @@ -691,7 +702,7 @@ def build_docker_images(self): if 'build' in service: context, dockerfile = self.get_build_info(service) print(f"Building {service['image']}") - self.__notes_helper.add_note({'note': f"Building {service['image']}", 'detail_name': '[NOTES]', 'timestamp': int(time.time_ns() / 1_000)}) + self.__notes_helper.add_note( note=f"Building {service['image']}", detail_name='[NOTES]', timestamp=int(time.time_ns() / 1_000)) # Make sure the context docker file exists and is not trying to escape some root. We don't need the returns context_path = self.join_paths(self.__working_folder, context) @@ -735,7 +746,7 @@ def build_docker_images(self): else: print(f"Pulling {service['image']}") - self.__notes_helper.add_note({'note':f"Pulling {service['image']}" , 'detail_name': '[NOTES]', 'timestamp': int(time.time_ns() / 1_000)}) + self.__notes_helper.add_note( note="Pulling {service['image']}" , detail_name='[NOTES]', timestamp=int(time.time_ns() / 1_000)) ps = subprocess.run(['docker', 'pull', service['image']], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='UTF-8', check=False) if ps.returncode != 0: @@ -1306,11 +1317,8 @@ def setup_services(self): def get_logs(self): return self.__stdout_logs - def add_to_log(self, container_name, message, cmd=''): - log_entry_name = f"{container_name}_{cmd}" - if log_entry_name not in self.__stdout_logs: - self.__stdout_logs[log_entry_name] = '' - self.__stdout_logs[log_entry_name] = '\n'.join((self.__stdout_logs[log_entry_name], message)) + def add_to_log(self, identifier, message): + self.__stdout_logs.append(f"{identifier}\n{message}") def save_warnings(self): if not self._run_id or self._dev_no_save: @@ -1349,7 +1357,7 @@ def start_metric_providers(self, allow_container=True, allow_other=True): metric_provider.start_profiling() if self._verbose_provider_boot: - self.__notes_helper.add_note({'note': message, 'detail_name': '[NOTES]', 'timestamp': int(time.time_ns() / 1_000)}) + self.__notes_helper.add_note( note=message, detail_name='[NOTES]', timestamp=int(time.time_ns() / 1_000)) self.custom_sleep(10) print(TerminalColors.HEADER, '\nWaiting for Metric Providers to boot ...', TerminalColors.ENDC) @@ -1385,7 +1393,7 @@ def start_phase(self, phase, transition = True): #print(TerminalColors.HEADER, '\nChecking if temperature is back to baseline ...', TerminalColors.ENDC) phase_time = int(time.time_ns() / 1_000) - self.__notes_helper.add_note({'note': f"Starting phase {phase}", 'detail_name': '[NOTES]', 'timestamp': phase_time}) + self.__notes_helper.add_note( note=f"Starting phase {phase}", detail_name='[NOTES]', timestamp=phase_time) self.__phases[phase] = {'start': phase_time, 'name': phase} @@ -1396,7 +1404,7 @@ def end_phase(self, phase): phase_time = int(time.time_ns() / 1_000) if self._phase_padding: - self.__notes_helper.add_note({'note': f"Ending phase {phase} [UNPADDED]", 'detail_name': '[NOTES]', 'timestamp': phase_time}) + self.__notes_helper.add_note( note=f"Ending phase {phase} [UNPADDED]", detail_name='[NOTES]', timestamp=phase_time) phase_time += self._phase_padding_ms*1000 # value is in ms and we need to get to us time.sleep(self._phase_padding_ms/1000) # no custom sleep here as even with dev_no_sleeps we must ensure phases don't overlap @@ -1407,16 +1415,16 @@ def end_phase(self, phase): for container_to_pause in self.__services_to_pause_phase[phase]: info_text = f"Pausing {container_to_pause} after phase: {phase}." print(info_text) - self.__notes_helper.add_note({'note': info_text, 'detail_name': '[NOTES]', 'timestamp': phase_time}) + self.__notes_helper.add_note( note= info_text, detail_name='[NOTES]', timestamp=phase_time) subprocess.run(['docker', 'pause', container_to_pause], check=True, stdout=subprocess.DEVNULL) self.__phases[phase]['end'] = phase_time if self._phase_padding: - self.__notes_helper.add_note({'note': f"Ending phase {phase} [PADDED]", 'detail_name': '[NOTES]', 'timestamp': phase_time}) + self.__notes_helper.add_note( note=f"Ending phase {phase} [PADDED]", detail_name='[NOTES]', timestamp=phase_time) else: - self.__notes_helper.add_note({'note': f"Ending phase {phase} [UNPADDED]", 'detail_name': '[NOTES]', 'timestamp': phase_time}) + self.__notes_helper.add_note( note=f"Ending phase {phase} [UNPADDED]", detail_name='[NOTES]', timestamp=phase_time) def run_flows(self): ps_to_kill_tmp = [] @@ -1438,7 +1446,7 @@ def run_flows(self): self.check_total_runtime_exceeded() if 'note' in cmd_obj: - self.__notes_helper.add_note({'note': cmd_obj['note'], 'detail_name': flow['container'], 'timestamp': int(time.time_ns() / 1_000)}) + self.__notes_helper.add_note( note=cmd_obj['note'], detail_name=flow['container'], timestamp=int(time.time_ns() / 1_000)) if cmd_obj['type'] == 'console': print(TerminalColors.HEADER, '\nConsole command', cmd_obj['command'], 'on container', flow['container'], TerminalColors.ENDC) @@ -1485,22 +1493,15 @@ def run_flows(self): print('Executing process synchronously.') if self._measurement_flow_process_duration: print(f"Alloting {self._measurement_flow_process_duration}s runtime ...") - ps = subprocess.run( - docker_exec_command, - stderr=stderr_behaviour, - stdout=stdout_behaviour, - encoding='UTF-8', - check=False, # cause it will be checked later and also ignore-errors checked - timeout=self._measurement_flow_process_duration, - ) - else: - ps = subprocess.run( - docker_exec_command, - stderr=stderr_behaviour, - stdout=stdout_behaviour, - encoding='UTF-8', - check=False, # cause it will be checked later and also ignore-errors checked - ) + + ps = subprocess.run( + docker_exec_command, + stderr=stderr_behaviour, + stdout=stdout_behaviour, + encoding='UTF-8', + check=False, # cause it will be checked later and also ignore-errors checked + timeout=self._measurement_flow_process_duration, + ) ps_to_read_tmp.append({ 'cmd': docker_exec_command, @@ -1509,6 +1510,7 @@ def run_flows(self): 'read-notes-stdout': cmd_obj.get('read-notes-stdout', False), 'ignore-errors': cmd_obj.get('ignore-errors', False), 'read-sci-stdout': cmd_obj.get('read-sci-stdout', False), + 'sub-phase-expansion-pattern': cmd_obj.get('sub-phase-expansion-pattern', None), 'detail_name': flow['container'], 'detach': cmd_obj.get('detach', False), }) @@ -1637,22 +1639,32 @@ def read_and_cleanup_processes(self): stdout = ps['ps'].stdout stderr = ps['ps'].stderr - if stdout: - for line in stdout.splitlines(): - print('stdout from process:', ps['cmd'], line) - self.add_to_log(ps['container_name'], f"stdout: {line}", ps['cmd']) + if stdout is not None: + print('stdout from process:', ps['cmd'], stdout) + self.add_to_log(f"{ps['container_name']} (ID: {id(ps['ps'])}; CMD: {ps['cmd']}); STDOUT", stdout) + + if ps['read-notes-stdout']: + self.__notes_helper.parse_and_add_notes(ps['detail_name'], stdout) + + if ps['read-sci-stdout']: + for match in re.findall(r'^GMT_SCI_R=(\d+)$', stdout, re.MULTILINE): + self._sci['R'] += int(match) - if ps['read-notes-stdout']: - if note := self.__notes_helper.parse_note(line): - self.__notes_helper.add_note({'note': note[1], 'detail_name': ps['detail_name'], 'timestamp': note[0]}) + if ps.get('sub-phase-expansion-pattern', None): + print('Pattern is', ps['sub-phase-expansion-pattern']) - if ps['read-sci-stdout']: - if match := re.findall(r'GMT_SCI_R=(\d+)', line): - self._sci['R'] += int(match[0]) - if stderr: - for line in stderr.splitlines(): - print('stderr from process:', ps['cmd'], line) - self.add_to_log(ps['container_name'], f"stderr: {line}", ps['cmd']) + last_timestamp = 0 + for match in re.findall(ps['sub-phase-expansion-pattern'], stdout, re.MULTILINE): + DB().query("INSERT INTO micro_phases (run_id, name, start_time, end_time) VALUES (%s, %s, %s, %s)", (self._run_id, match[1], last_timestamp, match[0])) + self.__phases[match[1]] = {'start': int(last_timestamp), 'name': match[1], 'end': int(match[0]) } + last_timestamp = match[0] + print('Phase start is', match[0]) + print('Phase name is', match[1]) + print('Phase end is ', '????') + + if stderr is not None: + print('stderr from process:', ps['cmd'], stderr) + self.add_to_log(f"{ps['container_name']} (ID: {id(ps['ps'])}; CMD: {ps['cmd']}); STDERR", stderr) def check_process_returncodes(self): print(TerminalColors.HEADER, '\nChecking process return codes', TerminalColors.ENDC) @@ -1676,7 +1688,7 @@ def start_measurement(self): self.__start_measurement = int(time.time_ns() / 1_000) self.__start_measurement_seconds = time.time() - self.__notes_helper.add_note({'note': 'Start of measurement', 'detail_name': '[NOTES]', 'timestamp': self.__start_measurement}) + self.__notes_helper.add_note( note='Start of measurement', detail_name='[NOTES]', timestamp=self.__start_measurement) def end_measurement(self, skip_on_already_ended=False): if self.__end_measurement: @@ -1685,7 +1697,7 @@ def end_measurement(self, skip_on_already_ended=False): raise RuntimeError('end_measurement was requested although value as already set!') self.__end_measurement = int(time.time_ns() / 1_000) - self.__notes_helper.add_note({'note': 'End of measurement', 'detail_name': '[NOTES]', 'timestamp': self.__end_measurement}) + self.__notes_helper.add_note( note='End of measurement', detail_name='[NOTES]', timestamp=self.__end_measurement) self.update_start_and_end_times() @@ -1753,21 +1765,18 @@ def read_container_logs(self): stderr=stderr_behaviour, ) - if log.stdout: - self.add_to_log(container_id, f"stdout: {log.stdout}") + if log.stdout is not None: + self.add_to_log(f"{container_info['name']} (STDOUT)", log.stdout) - if container_info['read-notes-stdout'] or container_info['read-sci-stdout']: - for line in log.stdout.splitlines(): - if container_info['read-notes-stdout']: - if note := self.__notes_helper.parse_note(line): - self.__notes_helper.add_note({'note': note[1], 'detail_name': container_info['name'], 'timestamp': note[0]}) + if container_info['read-notes-stdout']: + self.__notes_helper.parse_and_add_notes(container_info['name'], log.stdout) - if container_info['read-sci-stdout']: - if match := re.findall(r'GMT_SCI_R=(\d+)', line): - self._sci['R'] += int(match[0]) + if container_info['read-sci-stdout']: + for match in re.findall(r'^GMT_SCI_R=(\d+)$', log.stdout, re.MULTILINE): + self._sci['R'] += int(match) - if log.stderr: - self.add_to_log(container_id, f"stderr: {log.stderr}") + if log.stderr is not None: + self.add_to_log(f"{container_info['name']} (STDERR)", log.stderr) def save_stdout_logs(self): print(TerminalColors.HEADER, '\nSaving logs to DB', TerminalColors.ENDC) @@ -1776,7 +1785,7 @@ def save_stdout_logs(self): print('Skipping savings logs to DB due to missing run id or --dev-no-save') return # Nothing to do, but also no hard error needed - logs_as_str = '\n\n'.join([f"{k}:{v}" for k,v in self.__stdout_logs.items()]) + logs_as_str = '\n\n'.join(self.__stdout_logs) logs_as_str = logs_as_str.replace('\x00','') if logs_as_str: DB().query(""" @@ -1816,6 +1825,42 @@ def identify_invalid_run(self): self.__warnings.append(invalid_message) break # one is enough + def patch_phases(self): + if self.__phases: + last_phase_name, _ = next(reversed(self.__phases.items())) + if self.__phases[last_phase_name].get('end', None) is None: + self.__phases[last_phase_name]['end'] = int(time.time_ns() / 1_000) + + # Also patch Runtime phase separately, which we need as this will only get set after all child runtime phases + if self.__phases.get('[RUNTIME]', None) is not None and self.__phases['[RUNTIME]'].get('end', None) is None: + self.__phases['[RUNTIME]']['end'] = int(time.time_ns() / 1_000) + + def process_phase_stats(self): + if not self._run_id or self._dev_no_phase_stats or self._dev_no_save: + return + + # After every run, even if it failed, we want to generate phase stats. + # They will not show the accurate data, but they are still neded to understand how + # much a failed run has accrued in total energy and carbon costs + print(TerminalColors.HEADER, '\nCalculating and storing phases data. This can take a couple of seconds ...', TerminalColors.ENDC) + + # get all the metrics from the measurements table grouped by metric + # loop over them issuing separate queries to the DB + from tools.phase_stats import build_and_store_phase_stats # pylint: disable=import-outside-toplevel + build_and_store_phase_stats(self._run_id, self._sci) + + def post_process(self, index): + try: + for step in self._safe_post_processing_steps[index:]: + method_name, args = step + getattr(self, method_name)(**args) + index += 1 + except BaseException as exc: + self.add_to_log(f"{exc.__class__.__name__} (ID: {id(exc)})", str(exc)) + self.set_run_failed() + self.post_process(index+1) + raise exc + def cleanup(self, continue_measurement=False): #https://github.com/green-coding-solutions/green-metrics-tool/issues/97 print(TerminalColors.OKCYAN, '\nStarting cleanup routine', TerminalColors.ENDC) @@ -1834,7 +1879,7 @@ def cleanup(self, continue_measurement=False): print('Stopping containers') for container_id in self.__containers: subprocess.run(['docker', 'rm', '-f', container_id], check=True, stderr=subprocess.DEVNULL) - self.__containers = {} + self.__containers.clear() print('Removing network') for network_name in self.__networks: @@ -1862,15 +1907,19 @@ def cleanup(self, continue_measurement=False): self.__start_measurement_seconds = None self.__notes_helper = Notes() - self.__phases = OrderedDict() + self.__stdout_logs.clear() + self.__phases.clear() self.__end_measurement = None self.__join_default_network = False #self.__filename = self._original_filename # # we currently do not use this variable + self.__services_to_pause_phase.clear() + self.__join_default_network = False + self.__docker_params.clear() self.__working_folder = self._repo_folder self.__working_folder_rel = '' - self.__image_sizes = {} - self.__volume_sizes = {} - self.__warnings = [] + self.__image_sizes.clear() + self.__volume_sizes.clear() + self.__warnings.clear() print(TerminalColors.OKBLUE, '-Cleanup gracefully completed', TerminalColors.ENDC) @@ -1971,83 +2020,14 @@ def run(self): self.identify_invalid_run() except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) + self.add_to_log(f"{exc.__class__.__name__} (ID: {id(exc)})", str(exc)) self.set_run_failed() raise exc finally: try: - self.end_measurement(skip_on_already_ended=True) # end_measurement can already been set if error happens in check_process_returncodes - if self.__phases: - last_phase_name, _ = next(reversed(self.__phases.items())) - if self.__phases[last_phase_name].get('end', None) is None: - self.__phases[last_phase_name]['end'] = int(time.time_ns() / 1_000) - - # Also patch Runtime phase separately, which we need as this will only get set after all child runtime phases - if self.__phases.get('[RUNTIME]', None) is not None and self.__phases['[RUNTIME]'].get('end', None) is None: - self.__phases['[RUNTIME]']['end'] = int(time.time_ns() / 1_000) - - self.store_phases() - self.read_container_logs() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc + self.post_process(0) finally: - try: - self.stop_metric_providers() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc - finally: - try: - self.read_and_cleanup_processes() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc - finally: - try: - self.save_notes_runner() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc - finally: - try: - self.save_stdout_logs() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc - finally: - try: - self.save_warnings() - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc - finally: - try: - if self._run_id and self._dev_no_phase_stats is False and self._dev_no_save is False: - # After every run, even if it failed, we want to generate phase stats. - # They will not show the accurate data, but they are still neded to understand how - # much a failed run has accrued in total energy and carbon costs - print(TerminalColors.HEADER, '\nCalculating and storing phases data. This can take a couple of seconds ...', TerminalColors.ENDC) - - # get all the metrics from the measurements table grouped by metric - # loop over them issuing separate queries to the DB - from tools.phase_stats import build_and_store_phase_stats # pylint: disable=import-outside-toplevel - build_and_store_phase_stats(self._run_id, self._sci) - - except BaseException as exc: - self.add_to_log(exc.__class__.__name__, str(exc)) - self.set_run_failed() - raise exc - finally: - self.cleanup() # always run cleanup automatically after each run - - + self.cleanup() print(TerminalColors.OKGREEN, arrows('MEASUREMENT SUCCESSFULLY COMPLETED'), TerminalColors.ENDC) diff --git a/tests/data/usage_scenarios/stress_sci_multi.yml b/tests/data/usage_scenarios/stress_sci_multi.yml new file mode 100644 index 000000000..71eb887ff --- /dev/null +++ b/tests/data/usage_scenarios/stress_sci_multi.yml @@ -0,0 +1,29 @@ +--- +name: Test Stress +author: Dan Mateas +description: test + +sci: + R_d: Cool run + +services: + test-container: + type: container + image: gcb_stress + build: + context: ../stress-application + +flow: + - name: Stress + container: test-container + commands: + - type: console + shell: sh + command: stress-ng -c 1 -t 1 -q; echo GMT_SCI_R=500 + log-stdout: True + read-sci-stdout: True + - type: console + shell: sh + command: stress-ng -c 1 -t 1 -q; echo GMT_SCI_R=600 + log-stdout: True + read-sci-stdout: True \ No newline at end of file diff --git a/tests/lib/test_phase_stats.py b/tests/lib/test_phase_stats.py index eaf6591e4..0e33ce56c 100644 --- a/tests/lib/test_phase_stats.py +++ b/tests/lib/test_phase_stats.py @@ -410,3 +410,17 @@ def test_sci_run(): assert len(data) == 1 assert 50 < data[0]['value'] < 150 assert data[0]['unit'] == 'ugCO2e/Cool run' + +def test_sci_multi_steps_run(): + runner = ScenarioRunner(uri=GMT_ROOT_DIR, uri_type='folder', filename='tests/data/usage_scenarios/stress_sci_multi.yml', skip_system_checks=True, dev_cache_build=True, dev_no_sleeps=True, dev_no_metrics=False, dev_no_phase_stats=False) + + out = io.StringIO() + err = io.StringIO() + with redirect_stdout(out), redirect_stderr(err): + run_id = runner.run() + + data = DB().fetch_all("SELECT value, unit FROM phase_stats WHERE phase = %s AND run_id = %s AND metric = 'software_carbon_intensity_global' ", params=('004_[RUNTIME]', run_id), fetch_mode='dict') + + assert len(data) == 1 + assert 8 < data[0]['value'] < 20 + assert data[0]['unit'] == 'ugCO2e/Cool run' diff --git a/tests/lib/test_save_notes.py b/tests/lib/test_save_notes.py index 11f737e7d..a67e11c3a 100644 --- a/tests/lib/test_save_notes.py +++ b/tests/lib/test_save_notes.py @@ -13,23 +13,23 @@ ] -@pytest.mark.parametrize("run_id,note,detail,timestamp", invalid_test_data) -def test_invalid_timestamp(run_id, note, detail, timestamp): +@pytest.mark.parametrize("run_id,note,detail_name,timestamp", invalid_test_data) +def test_invalid_timestamp(run_id, note, detail_name, timestamp): with pytest.raises(ValueError) as err: notes = Notes() - notes.add_note({"note": note,"detail_name": detail,"timestamp": timestamp,}) + notes.add_note(note, detail_name, timestamp) notes.save_to_db(run_id) expected_exception = "invalid literal for int" assert expected_exception in str(err.value), \ Tests.assertion_info(f"Exception: {expected_exception}", str(err.value)) -@pytest.mark.parametrize("run_id,note,detail,timestamp", valid_test_data) +@pytest.mark.parametrize("run_id,note,detail_name,timestamp", valid_test_data) @patch('lib.db.DB.query') -def test_valid_timestamp(mock_query, run_id, note, detail, timestamp): +def test_valid_timestamp(mock_query, run_id, note, detail_name, timestamp): mock_query.return_value = None # Replace with the desired return value notes = Notes() - notes.add_note({"note": note, "detail_name": detail, "timestamp": timestamp}) + notes.add_note(note, detail_name, timestamp) notes.save_to_db(run_id) mock_query.assert_called_once()