|
1 | 1 | #!/usr/bin/env python3 |
2 | 2 |
|
3 | 3 | import argparse |
| 4 | +import json |
4 | 5 | import os |
5 | 6 | import yaml |
6 | 7 | import subprocess |
@@ -348,215 +349,112 @@ def parse_suite(suite_path, parent_suite_path, options, settings, name=None): |
348 | 349 | sys.exit(1) |
349 | 350 | return suite |
350 | 351 |
|
351 | | -def write_report_result_tree(file, includes, data, depth): |
352 | | - for test in data['suite']: |
353 | | - indent = ' ' * depth |
354 | | - stars = '*' + '*' * depth |
355 | | - |
356 | | - string = f"{indent}" |
357 | | - string += f"{stars}" |
358 | | - string += f" {resultfmt(test)}" |
359 | | - if 'outfile' in test: |
360 | | - string += f" <<output-{test['unix_name']},{test['uniq_id']} {test['name']}>>" |
361 | | - else: |
362 | | - string += f" {test['uniq_id']} {test['name']}" |
363 | | - |
364 | | - file.write(f"{string}\n") |
365 | | - |
366 | | - if 'suite' in test: |
367 | | - write_report_result_tree(file, includes, test, depth + 1) |
368 | | - |
369 | | -def resultfmt(test): |
370 | | - result = test.get('result', 'unknown') |
371 | | - if result == 'masked-fail': |
372 | | - return "[.fail line-through]#FAIL#" |
373 | | - elif result == 'masked-skip': |
374 | | - return "[.skip line-through]#SKIP#" |
375 | | - else: |
376 | | - return f"[.{result}]#{result.upper()}#" |
| 352 | +def collect_test_logs(test_data, depth=0): |
| 353 | + """Recursively collect all test logs and embed them in the data structure.""" |
| 354 | + if not test_data.get('suite'): |
| 355 | + return |
377 | 356 |
|
378 | | -def write_report_output(file, data, depth, is_first=True): |
379 | | - """For each test in suite, write specification¹, result, and output""" |
380 | | - for test in data['suite']: |
| 357 | + for test in test_data['suite']: |
381 | 358 | if 'outfile' in test: |
382 | | - # Add page break before each test, except first one |
383 | | - if is_first: |
384 | | - is_first = False |
385 | | - else: |
386 | | - file.write("\n<<<\n") |
387 | | - |
388 | | - # Test heading is always from 'name:' in the suite file |
389 | | - file.write(f"\n[[output-{test['unix_name']}]]\n") |
390 | | - file.write(f"\n=== {resultfmt(test)} {test['name']}\n") |
391 | | - |
392 | | - # Skip headnig from test spec. |
393 | | - if 'test-spec' in test: |
394 | | - file.write(f"include::{test['test-spec']}[lines=2..-1]\n") |
395 | | - |
396 | | - # Add test information table |
397 | | - file.write("\n==== Test Information\n") |
398 | | - file.write('[cols="1h,3"]\n') |
399 | | - file.write("|===\n") |
400 | | - file.write(f"| ID | `{test['uniq_id']}`\n") |
401 | | - file.write(f"| Name | `{test['name']}`\n") |
402 | | - |
403 | | - # Add test file path (relative to project root) |
404 | | - if 'case' in test: |
405 | | - rel_path = os.path.relpath(test['case'], ROOT_PATH) |
406 | | - file.write(f"| File | `{rel_path}`\n") |
407 | | - |
408 | | - # Add arguments if present |
409 | | - if 'options' in test and test['options']: |
410 | | - args_str = ', '.join(test['options']) |
411 | | - file.write(f"| Arguments | `{args_str}`\n") |
412 | | - else: |
413 | | - file.write("| Arguments | `None`\n") |
414 | | - |
415 | | - file.write("|===\n") |
416 | | - |
417 | | - file.write("\n==== Output\n") |
418 | | - file.write("----\n") |
419 | | - file.write(f"include::{test['outfile']}[]\n") |
420 | | - file.write("----\n") |
| 359 | + log_path = os.path.join(LOGDIR, test['outfile']) |
| 360 | + try: |
| 361 | + with open(log_path, 'r') as f: |
| 362 | + test['logs'] = f.read() |
| 363 | + except FileNotFoundError: |
| 364 | + test['logs'] = f"Log file not found: {log_path}" |
| 365 | + except Exception as e: |
| 366 | + test['logs'] = f"Error reading log file {log_path}: {e}" |
421 | 367 |
|
422 | 368 | if 'suite' in test: |
423 | | - is_first = write_report_output(file, test, depth + 1, is_first) |
424 | | - |
425 | | - return is_first |
426 | | - |
427 | | -def write_report_project_info(file, config): |
428 | | - if 'PROJECT-NAME' not in config or 'PROJECT-ROOT' not in config: |
429 | | - return None |
430 | | - |
431 | | - name = config['PROJECT-NAME'] |
432 | | - root = config['PROJECT-ROOT'] |
433 | | - version = run_git_cmd(root, ["describe", "--tags", "--always"]) |
434 | | - sha = run_git_cmd(root, ['rev-parse', 'HEAD'])[:12] |
435 | | - |
436 | | - file.write(f"\n=== {name} Info\n\n") |
437 | | - |
438 | | - file.write('[cols="1h,2", width=30%]\n') |
439 | | - file.write("|===\n") |
440 | | - file.write(f"| Version | {version}\n") |
441 | | - file.write(f"| SHA | {sha}\n") |
442 | | - |
443 | | - file.write("|===\n") |
| 369 | + collect_test_logs(test, depth + 1) |
| 370 | + |
| 371 | +def calculate_test_summary(test_data): |
| 372 | + """Calculate summary statistics for all tests.""" |
| 373 | + counts = { |
| 374 | + 'pass': 0, |
| 375 | + 'fail': 0, |
| 376 | + 'skip': 0, |
| 377 | + 'masked_fail': 0, |
| 378 | + 'masked_skip': 0, |
| 379 | + 'total': 0 |
| 380 | + } |
444 | 381 |
|
445 | | -def write_report_test_info(file, data): |
446 | | - pass_count = 0 |
447 | | - fail_count = 0 |
448 | | - skip_count = 0 |
449 | | - masked_fail_count = 0 |
450 | | - masked_skip_count = 0 |
| 382 | + def count_tests(data): |
| 383 | + if not data.get('suite'): |
| 384 | + return |
451 | 385 |
|
452 | | - def count_tests(suite_data): |
453 | | - nonlocal pass_count, fail_count, skip_count, masked_fail_count, masked_skip_count |
454 | | - for test in suite_data['suite']: |
| 386 | + for test in data['suite']: |
455 | 387 | if 'suite' in test: |
456 | 388 | # This is a sub-suite, recurse but don't count it |
457 | 389 | count_tests(test) |
458 | 390 | elif 'result' in test: |
459 | 391 | # This is a leaf test case, count it |
460 | | - if test['result'] == 'pass': |
461 | | - pass_count += 1 |
462 | | - elif test['result'] == 'fail': |
463 | | - fail_count += 1 |
464 | | - elif test['result'] == 'skip': |
465 | | - skip_count += 1 |
466 | | - elif test['result'] == 'masked-fail': |
467 | | - masked_fail_count += 1 |
468 | | - elif test['result'] == 'masked-skip': |
469 | | - masked_skip_count += 1 |
470 | | - |
471 | | - count_tests(data) |
472 | | - |
473 | | - file.write("\n=== Test Overview\n\n") |
474 | | - file.write('[cols="1h,2", width=30%]\n') |
475 | | - file.write("|===\n") |
476 | | - file.write(f"| {resultfmt({'result': 'pass'})} | {pass_count}\n") |
477 | | - file.write(f"| {resultfmt({'result': 'fail'})} | {fail_count}\n") |
478 | | - file.write(f"| {resultfmt({'result': 'skip'})} | {skip_count}\n") |
479 | | - file.write(f"| {resultfmt({'result': 'masked-fail'})} | {masked_fail_count}\n") |
480 | | - file.write(f"| {resultfmt({'result': 'masked-skip'})} | {masked_skip_count}\n") |
481 | | - |
482 | | - total_count = pass_count + fail_count + skip_count + masked_fail_count + masked_skip_count |
483 | | - file.write(f"| *TOTAL* | *{total_count}*\n") |
484 | | - file.write("|===\n") |
485 | | - |
486 | | - includes = [] |
487 | | - write_report_result_tree(file, includes, data, 0) |
488 | | - |
489 | | -def write_report(data, config): |
490 | | - with open(os.path.join(LOGDIR, 'report.adoc'), 'a') as file: |
491 | | - current_date = datetime.now().strftime("%Y-%m-%d") |
492 | | - name = config['PROJECT-NAME'] if 'PROJECT-NAME' in config else "9pm" |
493 | | - root = config['PROJECT-ROOT'] |
494 | | - topdoc = config['PROJECT-TOPDOC'] + "/" if 'PROJECT-TOPDOC' in config else "" |
495 | | - version = run_git_cmd(root, ["describe", "--tags", "--always"]) |
496 | | - |
497 | | - file.write(":title-page:\n") |
498 | | - file.write(f":topdoc: {topdoc}\n") |
499 | | - file.write("ifdef::logo[]\n") # Optional -a logo=PATH from asciidoctor-pdf |
500 | | - file.write(":title-logo-image: {logo}\n") |
501 | | - file.write("endif::[]\n") |
502 | | - file.write(":toc:\n") |
503 | | - file.write(":toclevels: 2\n") |
504 | | - file.write(":sectnums:\n") |
505 | | - file.write(":sectnumlevels: 2\n") |
506 | | - file.write(":pdfmark:\n") |
507 | | - file.write(":pdf-page-size: A4\n") |
508 | | - file.write(":pdf-page-layout: portrait\n") |
509 | | - file.write(":pdf-page-margin: [1in, 0.5in]\n") |
510 | | - file.write(f":keywords: regression, test, testing, 9pm, {name}\n") |
511 | | - file.write(":subject: Regression testing\n") |
512 | | - file.write(":autofit-option:\n") |
513 | | - file.write("\n") |
514 | | - |
515 | | - file.write(f"= Test Report\n") |
516 | | - file.write(f"{name} {version}\n") |
517 | | - file.write(f"{current_date}\n") |
518 | | - |
519 | | - file.write("\n<<<\n") |
520 | | - file.write("\n== Test Summary\n\n") |
521 | | - write_report_project_info(file, config) |
522 | | - write_report_test_info(file, data) |
523 | | - |
524 | | - file.write("\n<<<\n") |
525 | | - file.write("\n== Test Result\n\n") |
526 | | - write_report_output(file, data, 0) |
527 | | - |
528 | | - |
529 | | -def write_github_result_tree(file, data, depth): |
530 | | - icon_map = { |
531 | | - "pass": ":white_check_mark:", |
532 | | - "fail": ":red_circle:", |
533 | | - "skip": ":large_orange_diamond:", |
534 | | - "masked-fail": ":o:", |
535 | | - "masked-skip": ":small_orange_diamond:", |
| 392 | + result = test['result'] |
| 393 | + if result == 'pass': |
| 394 | + counts['pass'] += 1 |
| 395 | + elif result == 'fail': |
| 396 | + counts['fail'] += 1 |
| 397 | + elif result == 'skip': |
| 398 | + counts['skip'] += 1 |
| 399 | + elif result == 'masked-fail': |
| 400 | + counts['masked_fail'] += 1 |
| 401 | + elif result == 'masked-skip': |
| 402 | + counts['masked_skip'] += 1 |
| 403 | + counts['total'] += 1 |
| 404 | + |
| 405 | + count_tests(test_data) |
| 406 | + return counts |
| 407 | + |
| 408 | +def write_json_result(data, config): |
| 409 | + """Write comprehensive JSON result file with embedded logs.""" |
| 410 | + # Collect all test logs and embed them in the data structure |
| 411 | + collect_test_logs(data) |
| 412 | + |
| 413 | + # Calculate summary statistics |
| 414 | + summary = calculate_test_summary(data) |
| 415 | + |
| 416 | + # Prepare metadata |
| 417 | + current_time = datetime.now() |
| 418 | + project_info = {} |
| 419 | + if config: |
| 420 | + if 'PROJECT-NAME' in config: |
| 421 | + project_info['name'] = config['PROJECT-NAME'] |
| 422 | + if 'PROJECT-ROOT' in config: |
| 423 | + project_info['root'] = config['PROJECT-ROOT'] |
| 424 | + # Get git info |
| 425 | + version = run_git_cmd(config['PROJECT-ROOT'], ["describe", "--tags", "--always"]) |
| 426 | + sha = run_git_cmd(config['PROJECT-ROOT'], ['rev-parse', 'HEAD']) |
| 427 | + project_info['version'] = version |
| 428 | + project_info['sha'] = sha |
| 429 | + if 'PROJECT-TOPDOC' in config: |
| 430 | + project_info['topdoc'] = config['PROJECT-TOPDOC'] |
| 431 | + |
| 432 | + # Get 9pm version info |
| 433 | + ninepm_sha = run_git_cmd(ROOT_PATH, ['rev-parse', 'HEAD']) |
| 434 | + |
| 435 | + # Build complete JSON structure |
| 436 | + json_data = { |
| 437 | + 'metadata': { |
| 438 | + 'timestamp': current_time.isoformat(), |
| 439 | + 'date': current_time.strftime("%Y-%m-%d"), |
| 440 | + 'project': project_info, |
| 441 | + 'environment': { |
| 442 | + '9pm_version': ninepm_sha[:10] if ninepm_sha else 'unknown', |
| 443 | + 'log_dir': LOGDIR, |
| 444 | + 'scratch_dir': SCRATCHDIR, |
| 445 | + 'root_path': ROOT_PATH |
| 446 | + } |
| 447 | + }, |
| 448 | + 'summary': summary, |
| 449 | + 'suite': data |
536 | 450 | } |
537 | | - for test in data['suite']: |
538 | | - mark = icon_map.get(test['result'], "") |
539 | | - file.write(f"{' ' * depth}- {mark} : {test['uniq_id']} {test['name']}\n") |
540 | 451 |
|
541 | | - if 'suite' in test: |
542 | | - write_github_result_tree(file, test, depth + 1) |
543 | | - |
544 | | -def write_github_result(data): |
545 | | - with open(os.path.join(LOGDIR, 'result-gh.md'), 'a') as file: |
546 | | - file.write("# Test Result\n") |
547 | | - write_github_result_tree(file, data, 0) |
| 452 | + # Write JSON file |
| 453 | + json_path = os.path.join(LOGDIR, 'result.json') |
| 454 | + with open(json_path, 'w') as f: |
| 455 | + json.dump(json_data, f, indent=2, ensure_ascii=False) |
548 | 456 |
|
549 | | -def write_md_result_tree(file, data, depth): |
550 | | - for test in data['suite']: |
551 | | - file.write(f"{' ' * depth}- {test['result'].upper()} : {test['uniq_id']} {test['name']}\n") |
552 | | - |
553 | | - if 'suite' in test: |
554 | | - write_md_result_tree(file, test, depth + 1) |
555 | | - |
556 | | -def write_md_result(data): |
557 | | - with open(os.path.join(LOGDIR, 'result.md'), 'a') as file: |
558 | | - file.write("# Test Result\n") |
559 | | - write_md_result_tree(file, data, 0) |
| 457 | + return json_path |
560 | 458 |
|
561 | 459 | def print_result_tree(data, base): |
562 | 460 | i = 1 |
@@ -911,9 +809,10 @@ def main(): |
911 | 809 | cprint(pcolor.green, "\no Execution") |
912 | 810 |
|
913 | 811 | print_result_tree(suite, "") |
914 | | - write_md_result(suite) |
915 | | - write_github_result(suite) |
916 | | - write_report(suite, proj) |
| 812 | + |
| 813 | + # Export comprehensive JSON result |
| 814 | + json_path = write_json_result(suite, proj) |
| 815 | + vcprint(pcolor.faint, f"JSON results written to: {json_path}") |
917 | 816 |
|
918 | 817 | db.close() |
919 | 818 | sys.exit(err) |
|
0 commit comments