|
1 | 1 | #!/usr/bin/env python3
|
2 | 2 | # -*- coding: utf-8 -*-
|
3 | 3 |
|
| 4 | +import glob |
4 | 5 | import sys
|
5 | 6 | import faulthandler
|
6 | 7 | faulthandler.enable(file=sys.__stderr__) # will catch segfaults and write to stderr
|
|
31 | 32 |
|
32 | 33 | parser.add_argument('--uri', type=str, help='The URI to get the usage_scenario.yml from. Can be either a local directory starting with / or a remote git repository starting with http(s)://')
|
33 | 34 | parser.add_argument('--branch', type=str, help='Optionally specify the git branch when targeting a git repository')
|
34 |
| - parser.add_argument('--filename', type=str, default='usage_scenario.yml', help='An optional alternative filename if you do not want to use "usage_scenario.yml"') |
| 35 | + parser.add_argument('--filename', type=str, action='append', help='An optional alternative filename if you do not want to use "usage_scenario.yml". Multiple filenames can be provided (e.g. "--filename usage_scenario_1.yml --filename usage_scenario_2.yml"). Paths like ../usage_scenario.yml and wildcards like *.yml are supported. Duplicate filenames are allowed and will be processed multiple times.') |
35 | 36 |
|
36 | 37 | parser.add_argument('--variables', nargs='+', help='Variables that will be replaced into the usage_scenario.yml file')
|
37 | 38 | parser.add_argument('--commit-hash-folder', help='Use a different folder than the repository root to determine the commit hash for the run')
|
|
57 | 58 | parser.add_argument('--dev-no-save', action='store_true', help='Will save no data to the DB. This implicitly activates --dev-no-phase-stats, --dev-no-metrics and --dev-no-optimizations')
|
58 | 59 | parser.add_argument('--print-phase-stats', type=str, help='Prints the stats for the given phase to the CLI for quick verification without the Dashboard. Try "[RUNTIME]" as argument.')
|
59 | 60 | parser.add_argument('--print-logs', action='store_true', help='Prints the container and process logs to stdout')
|
| 61 | + parser.add_argument('--iterations', type=int, default=1, help='Specify how many times each scenario should be run. Default is 1. With multiple files, all files are processed sequentially, then the entire sequence is repeated N times. Example: with files A.yml, B.yml and --iterations 2, the execution order is A, B, A, B.') |
| 62 | + |
60 | 63 |
|
61 | 64 | # Measurement settings
|
62 | 65 | parser.add_argument('--measurement-system-check-threshold', type=int, default=3, help='System check threshold when to issue warning and when to fail. When set on 3 runs will fail only on erros, when 2 then also on warnings and 1 also on pure info statements. Can be 1=INFO, 2=WARN or 3=ERROR')
|
|
125 | 128 | sys.exit(1)
|
126 | 129 | GlobalConfig(config_location=args.config_override)
|
127 | 130 |
|
128 |
| - runner = ScenarioRunner(name=args.name, uri=args.uri, uri_type=run_type, filename=args.filename, |
129 |
| - branch=args.branch, debug_mode=args.debug, allow_unsafe=args.allow_unsafe, |
130 |
| - skip_system_checks=args.skip_system_checks, |
131 |
| - skip_unsafe=args.skip_unsafe,verbose_provider_boot=args.verbose_provider_boot, |
132 |
| - full_docker_prune=args.full_docker_prune, dev_no_sleeps=args.dev_no_sleeps, |
133 |
| - dev_cache_build=args.dev_cache_build, dev_no_metrics=args.dev_no_metrics, dev_no_save=args.dev_no_save, |
134 |
| - dev_flow_timetravel=args.dev_flow_timetravel, dev_no_optimizations=args.dev_no_optimizations, |
135 |
| - docker_prune=args.docker_prune, dev_no_phase_stats=args.dev_no_phase_stats, user_id=args.user_id, |
136 |
| - skip_volume_inspect=args.skip_volume_inspect, commit_hash_folder=args.commit_hash_folder, |
137 |
| - usage_scenario_variables=variables_dict, phase_padding=not args.no_phase_padding, |
138 |
| - measurement_system_check_threshold=args.measurement_system_check_threshold, |
139 |
| - measurement_pre_test_sleep=args.measurement_pre_test_sleep, |
140 |
| - measurement_idle_duration=args.measurement_idle_duration, |
141 |
| - measurement_baseline_duration=args.measurement_baseline_duration, |
142 |
| - measurement_post_test_sleep=args.measurement_post_test_sleep, |
143 |
| - measurement_phase_transition_time=args.measurement_phase_transition_time, |
144 |
| - measurement_wait_time_dependencies=args.measurement_wait_time_dependencies, |
145 |
| - measurement_flow_process_duration=args.measurement_flow_process_duration, |
146 |
| - measurement_total_duration=args.measurement_total_duration, |
147 |
| - #disabled_metric_providers # this is intentionally not supported as the user can just edit the config in CLI mode and using another args="+" for parsing CLI is flaky |
148 |
| - #allowed_run_args=user._capabilities['measurement']['orchestrators']['docker']['allowed_run_args'] # this is intentionally not supported as the user can just enter --allow-unsafe in CLI mode and using another args="+" for parsing CLI is flaky |
149 |
| - ) |
| 131 | + # Use default filename if none provided |
| 132 | + filename_patterns = args.filename if args.filename else ['usage_scenario.yml'] |
| 133 | + using_default_filename = not args.filename |
| 134 | + |
| 135 | + filenames = [] |
| 136 | + for pattern in filename_patterns: |
| 137 | + if run_type == 'folder': |
| 138 | + # For local directories, look for files relative to the URI path |
| 139 | + search_pattern = os.path.join(args.uri, pattern) |
| 140 | + matches = glob.glob(search_pattern) |
| 141 | + # Convert absolute paths back to relative paths for ScenarioRunner |
| 142 | + valid_files = [] |
| 143 | + for match in matches: |
| 144 | + if os.path.isfile(match): |
| 145 | + # Convert absolute path back to relative path |
| 146 | + relative_path = os.path.relpath(match, args.uri) |
| 147 | + valid_files.append(relative_path) |
| 148 | + |
| 149 | + if not valid_files: |
| 150 | + if using_default_filename: |
| 151 | + print(TerminalColors.FAIL, f'Error: Default file not found: {pattern}. Search pattern: {search_pattern}', TerminalColors.ENDC) |
| 152 | + print('Please create the file or specify a different file with --filename') |
| 153 | + else: |
| 154 | + print(TerminalColors.FAIL, f'Error: No valid files found for --filename pattern: {pattern}. Search pattern: {search_pattern}', TerminalColors.ENDC) |
| 155 | + sys.exit(1) |
| 156 | + filenames.extend(valid_files) |
| 157 | + else: |
| 158 | + # For URLs, file validation will happen after checkout in ScenarioRunner |
| 159 | + # Just pass the pattern as-is since we can't validate files that don't exist locally yet |
| 160 | + filenames.append(pattern) |
| 161 | + |
| 162 | + # Execute the given usage scenarios multiple times (if iterations > 1) |
| 163 | + filenames = filenames * args.iterations |
| 164 | + |
| 165 | + runner = None |
150 | 166 |
|
151 | 167 | # Using a very broad exception makes sense in this case as we have excepted all the specific ones before
|
152 | 168 | #pylint: disable=broad-except
|
153 | 169 | try:
|
154 |
| - run_id = runner.run() # Start main code |
155 |
| - |
156 |
| - # this code can live at a different position. |
157 |
| - # From a user perspective it makes perfect sense to run both jobs directly after each other |
158 |
| - # In a cloud setup it however makes sense to free the measurement machine as soon as possible |
159 |
| - # So this code should be individually callable, separate from the runner |
160 |
| - |
161 |
| - if runner._dev_no_optimizations is False and runner._dev_no_save is False: |
162 |
| - import optimization_providers.base # We need to import this here as we need the correct config file |
163 |
| - print(TerminalColors.HEADER, '\nImporting optimization reporters ...', TerminalColors.ENDC) |
164 |
| - optimization_providers.base.import_reporters() |
165 |
| - |
166 |
| - print(TerminalColors.HEADER, '\nRunning optimization reporters ...', TerminalColors.ENDC) |
167 |
| - |
168 |
| - optimization_providers.base.run_reporters(runner._user_id, runner._run_id, runner._tmp_folder, runner.get_optimizations_ignore()) |
169 |
| - |
170 |
| - if args.file_cleanup: |
171 |
| - shutil.rmtree(runner._tmp_folder) |
172 |
| - |
173 |
| - if not runner._dev_no_save: |
174 |
| - print(TerminalColors.OKGREEN,'\n\n####################################################################################') |
175 |
| - print(f"Please access your report on the URL {GlobalConfig().config['cluster']['metrics_url']}/stats.html?id={runner._run_id}") |
176 |
| - print('####################################################################################\n\n', TerminalColors.ENDC) |
177 |
| - |
178 |
| - |
179 |
| - if args.print_phase_stats: |
180 |
| - phase_stats = DB().fetch_all('SELECT metric, detail_name, value, type, unit FROM phase_stats WHERE run_id = %s and phase LIKE %s ', params=(runner._run_id, f"%{args.print_phase_stats}")) |
181 |
| - print(f"Data for phase {args.print_phase_stats}") |
182 |
| - for el in phase_stats: |
183 |
| - print(el) |
184 |
| - print('') |
185 |
| - else: |
186 |
| - print(TerminalColors.OKGREEN,'\n\n####################################################################################') |
187 |
| - print('Run finished | --dev-no-save was active and nothing was written to DB') |
188 |
| - print('####################################################################################\n\n', TerminalColors.ENDC) |
| 170 | + for filename in filenames: |
| 171 | + print(TerminalColors.OKBLUE, '\nRunning: ', filename, TerminalColors.ENDC) |
| 172 | + |
| 173 | + runner = ScenarioRunner(name=args.name, uri=args.uri, uri_type=run_type, filename=filename, |
| 174 | + branch=args.branch, debug_mode=args.debug, allow_unsafe=args.allow_unsafe, |
| 175 | + skip_system_checks=args.skip_system_checks, |
| 176 | + skip_unsafe=args.skip_unsafe,verbose_provider_boot=args.verbose_provider_boot, |
| 177 | + full_docker_prune=args.full_docker_prune, dev_no_sleeps=args.dev_no_sleeps, |
| 178 | + dev_cache_build=args.dev_cache_build, dev_no_metrics=args.dev_no_metrics, dev_no_save=args.dev_no_save, |
| 179 | + dev_flow_timetravel=args.dev_flow_timetravel, dev_no_optimizations=args.dev_no_optimizations, |
| 180 | + docker_prune=args.docker_prune, dev_no_phase_stats=args.dev_no_phase_stats, user_id=args.user_id, |
| 181 | + skip_volume_inspect=args.skip_volume_inspect, commit_hash_folder=args.commit_hash_folder, |
| 182 | + usage_scenario_variables=variables_dict, phase_padding=not args.no_phase_padding, |
| 183 | + measurement_system_check_threshold=args.measurement_system_check_threshold, |
| 184 | + measurement_pre_test_sleep=args.measurement_pre_test_sleep, |
| 185 | + measurement_idle_duration=args.measurement_idle_duration, |
| 186 | + measurement_baseline_duration=args.measurement_baseline_duration, |
| 187 | + measurement_post_test_sleep=args.measurement_post_test_sleep, |
| 188 | + measurement_phase_transition_time=args.measurement_phase_transition_time, |
| 189 | + measurement_wait_time_dependencies=args.measurement_wait_time_dependencies, |
| 190 | + measurement_flow_process_duration=args.measurement_flow_process_duration, |
| 191 | + measurement_total_duration=args.measurement_total_duration, |
| 192 | + #disabled_metric_providers # this is intentionally not supported as the user can just edit the config in CLI mode and using another args="+" for parsing CLI is flaky |
| 193 | + #allowed_run_args=user._capabilities['measurement']['orchestrators']['docker']['allowed_run_args'] # this is intentionally not supported as the user can just enter --allow-unsafe in CLI mode and using another args="+" for parsing CLI is flaky |
| 194 | + ) |
| 195 | + |
| 196 | + run_id = runner.run() # Start main code |
| 197 | + |
| 198 | + # this code can live at a different position. |
| 199 | + # From a user perspective it makes perfect sense to run both jobs directly after each other |
| 200 | + # In a cloud setup it however makes sense to free the measurement machine as soon as possible |
| 201 | + # So this code should be individually callable, separate from the runner |
| 202 | + if not runner._dev_no_optimizations and not runner._dev_no_save: |
| 203 | + import optimization_providers.base # We need to import this here as we need the correct config file |
| 204 | + print(TerminalColors.HEADER, '\nImporting optimization reporters ...', TerminalColors.ENDC) |
| 205 | + optimization_providers.base.import_reporters() |
| 206 | + print(TerminalColors.HEADER, '\nRunning optimization reporters ...', TerminalColors.ENDC) |
| 207 | + optimization_providers.base.run_reporters(runner._user_id, runner._run_id, runner._tmp_folder, runner.get_optimizations_ignore()) |
| 208 | + |
| 209 | + if args.file_cleanup: |
| 210 | + shutil.rmtree(runner._tmp_folder) |
| 211 | + |
| 212 | + if not runner._dev_no_save: |
| 213 | + print(TerminalColors.OKGREEN,'\n\n####################################################################################') |
| 214 | + print(f"Please access your report on the URL {GlobalConfig().config['cluster']['metrics_url']}/stats.html?id={runner._run_id}") |
| 215 | + print('####################################################################################\n\n', TerminalColors.ENDC) |
| 216 | + |
| 217 | + if args.print_phase_stats: |
| 218 | + phase_stats = DB().fetch_all('SELECT metric, detail_name, value, type, unit FROM phase_stats WHERE run_id = %s and phase LIKE %s ', params=(runner._run_id, f"%{args.print_phase_stats}")) |
| 219 | + print(f"Data for phase {args.print_phase_stats}") |
| 220 | + for el in phase_stats: |
| 221 | + print(el) |
| 222 | + print('') |
| 223 | + else: |
| 224 | + print(TerminalColors.OKGREEN,'\n\n####################################################################################') |
| 225 | + print('Run finished | --dev-no-save was active and nothing was written to DB') |
| 226 | + print('####################################################################################\n\n', TerminalColors.ENDC) |
189 | 227 |
|
190 | 228 | except KeyboardInterrupt:
|
191 | 229 | pass
|
192 | 230 | except FileNotFoundError as e:
|
193 |
| - error_helpers.log_error('File or executable not found', exception_context=e.__context__, final_exception=e, run_id=runner._run_id) |
| 231 | + error_helpers.log_error('File or executable not found', exception_context=e.__context__, final_exception=e, run_id=runner._run_id if runner else None) |
194 | 232 | except subprocess.CalledProcessError as e:
|
195 |
| - error_helpers.log_error('Command failed', stdout=e.stdout, stderr=e.stderr, exception_context=e.__context__, run_id=runner._run_id) |
| 233 | + error_helpers.log_error('Command failed', stdout=e.stdout, stderr=e.stderr, exception_context=e.__context__, run_id=runner._run_id if runner else None) |
196 | 234 | except RuntimeError as e:
|
197 |
| - error_helpers.log_error('RuntimeError occured in runner.py', exception_context=e.__context__, final_exception=e, run_id=runner._run_id) |
| 235 | + error_helpers.log_error('RuntimeError occured in runner.py', exception_context=e.__context__, final_exception=e, run_id=runner._run_id if runner else None) |
198 | 236 | except BaseException as e:
|
199 |
| - error_helpers.log_error('Base exception occured in runner.py', exception_context=e.__context__, final_exception=e, run_id=runner._run_id) |
| 237 | + error_helpers.log_error('Base exception occured in runner.py', exception_context=e.__context__, final_exception=e, run_id=runner._run_id if runner else None) |
200 | 238 | finally:
|
201 |
| - if args.print_logs: |
| 239 | + if args.print_logs and runner: |
202 | 240 | for container_id_outer, std_out in runner.get_logs().items():
|
203 | 241 | print(f"Container logs of '{container_id_outer}':")
|
204 | 242 | print(std_out)
|
|
0 commit comments