|
16 | 16 | limitations under the License. |
17 | 17 | """ |
18 | 18 |
|
| 19 | +import csv |
| 20 | +import datetime |
19 | 21 | import functools |
| 22 | +import os |
20 | 23 | import re |
21 | 24 | from behave.model import ScenarioOutline |
22 | 25 | from behave.model_core import Status |
23 | 26 | from copy import deepcopy |
| 27 | +from toolium.driver_wrappers_pool import DriverWrappersPool |
24 | 28 |
|
25 | 29 |
|
26 | 30 | def get_accuracy_and_executions_from_tags(tags, accuracy_data_len=None): |
@@ -90,26 +94,38 @@ def patch_scenario_with_accuracy(context, scenario, data_key_suffix, accuracy=0. |
90 | 94 | def scenario_run_with_accuracy(context, scenario_run, scenario, *args, **kwargs): |
91 | 95 | # Execute the scenario multiple times and count passed executions |
92 | 96 | passed_executions = skipped_executions = 0 |
| 97 | + results = [] |
| 98 | + |
93 | 99 | # Copy scenario steps to avoid modifications in each execution, especially when using behave variables |
94 | 100 | # transformation, like map_param and replace_param methods |
95 | 101 | orig_steps = deepcopy(scenario.steps) |
96 | 102 | for execution in range(executions): |
97 | 103 | context.logger.info(f"Running accuracy scenario execution ({execution+1}/{executions})") |
| 104 | + # Store execution data in context |
98 | 105 | store_execution_data(context, execution, data_key_suffix) |
99 | 106 | # Restore original steps before each execution |
100 | 107 | scenario.steps = deepcopy(orig_steps) |
101 | | - if not scenario_run(*args, **kwargs): |
102 | | - if scenario.status == Status.skipped: |
103 | | - skipped_executions += 1 |
104 | | - status = "SKIPPED" |
105 | | - else: |
106 | | - passed_executions += 1 |
107 | | - status = "PASSED" |
108 | | - else: |
| 108 | + |
| 109 | + # Run scenario |
| 110 | + scenario_result = scenario_run(*args, **kwargs) |
| 111 | + |
| 112 | + # Store and log execution result |
| 113 | + if scenario_result: |
109 | 114 | status = "FAILED" |
| 115 | + elif scenario.status == Status.skipped: |
| 116 | + skipped_executions += 1 |
| 117 | + status = "SKIPPED" |
| 118 | + else: |
| 119 | + passed_executions += 1 |
| 120 | + status = "PASSED" |
| 121 | + results.append({'data': context.storage["accuracy_execution_data"], 'status': status, |
| 122 | + 'message': get_error_message_from_scenario(scenario)}) |
110 | 123 | print(f"ACCURACY SCENARIO {status}: execution {execution+1}/{executions}") |
111 | 124 | context.logger.info(f"Accuracy scenario execution {status} ({execution+1}/{executions})") |
112 | 125 |
|
| 126 | + # Save results to CSV file |
| 127 | + save_accuracy_results_to_csv(context, scenario, results) |
| 128 | + |
113 | 129 | if executions == skipped_executions: |
114 | 130 | run_response = False # Run method returns true only when failed |
115 | 131 | context.logger.info("All accuracy scenario executions are skipped") |
@@ -220,3 +236,48 @@ def after_accuracy_scenario(context, scenario): |
220 | 236 | :param scenario: behave scenario |
221 | 237 | """ |
222 | 238 | pass |
| 239 | + |
| 240 | + |
| 241 | +def get_error_message_from_scenario(scenario): |
| 242 | + """ |
| 243 | + Extract error message from failed scenario. |
| 244 | +
|
| 245 | + :param scenario: behave scenario |
| 246 | + :return: error message string |
| 247 | + """ |
| 248 | + if scenario.exception: |
| 249 | + return str(scenario.exception) |
| 250 | + for step in scenario.steps: |
| 251 | + if step.status == Status.failed: |
| 252 | + return str(step.exception) |
| 253 | + return "" |
| 254 | + |
| 255 | + |
| 256 | +def save_accuracy_results_to_csv(context, scenario, results): |
| 257 | + """ |
| 258 | + Save accuracy test results to a CSV file. |
| 259 | +
|
| 260 | + :param context: behave context |
| 261 | + :param scenario: behave scenario |
| 262 | + :param results: list of execution results |
| 263 | + """ |
| 264 | + # Create output directory if it doesn't exist |
| 265 | + output_dir = os.path.join(DriverWrappersPool.output_directory, "accuracy") |
| 266 | + if not os.path.exists(output_dir): |
| 267 | + os.makedirs(output_dir) |
| 268 | + |
| 269 | + # Generate filename with timestamp and scenario info |
| 270 | + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") |
| 271 | + scenario_name = scenario.name.replace(" ", "_").replace(",", "") |
| 272 | + csv_filename = f"{output_dir}/results_{scenario_name}_{timestamp}.csv" |
| 273 | + |
| 274 | + # Write execution results to file |
| 275 | + try: |
| 276 | + with open(csv_filename, mode='w', newline='', encoding='utf-8') as csv_file: |
| 277 | + csv_writer = csv.writer(csv_file) |
| 278 | + csv_writer.writerow(['Execution data', 'Status', 'Message']) |
| 279 | + for result in results: |
| 280 | + csv_writer.writerow([result['data'], result['status'], result['message']]) |
| 281 | + context.logger.info(f"Accuracy results saved to: {csv_filename}") |
| 282 | + except Exception as e: |
| 283 | + context.logger.error(f"Error saving accuracy results to CSV: {e}") |
0 commit comments