11
11
import shlex
12
12
import sys
13
13
import urllib .parse
14
+ import datetime
15
+ import logging
16
+
17
+ from math import ceil
14
18
15
- import requests
16
19
import yaml
20
+ import requests
17
21
18
22
from shrub .config import Configuration
19
23
from shrub .command import CommandDefinition
20
24
from shrub .task import TaskDependency
21
25
from shrub .variant import DisplayTaskDefinition
22
26
from shrub .variant import TaskSpec
27
+ from shrub .operations import CmdTimeoutUpdate
28
+
29
+ from evergreen .api import RetryingEvergreenApi
23
30
24
31
# Get relative imports to work when the package is not installed on the PYTHONPATH.
25
32
if __name__ == "__main__" and __package__ is None :
30
37
from buildscripts import resmokelib
31
38
from buildscripts .ciconfig import evergreen
32
39
from buildscripts .client import evergreen as evergreen_client
40
+ from buildscripts .util import teststats
33
41
# pylint: enable=wrong-import-position
34
42
43
+ LOGGER = logging .getLogger (__name__ )
44
+
35
45
API_REST_PREFIX = "/rest/v1/"
36
46
API_SERVER_DEFAULT = "https://evergreen.mongodb.com"
47
+ AVG_TEST_RUNTIME_ANALYSIS_DAYS = 14
48
+ AVG_TEST_TIME_MULTIPLIER = 3
49
+ CONFIG_FILE = "../src/.evergreen.yml"
37
50
REPEAT_SUITES = 2
38
51
EVERGREEN_FILE = "etc/evergreen.yml"
52
+ MIN_AVG_TEST_OVERFLOW_SEC = 60
53
+ MIN_AVG_TEST_TIME_SEC = 5 * 60
39
54
# The executor_file and suite_files defaults are required to make the suite resolver work
40
55
# correctly.
41
56
SELECTOR_FILE = "etc/burn_in_tests.yml"
@@ -97,6 +112,9 @@ def parse_command_line():
97
112
parser .add_option ("--reportFile" , dest = "report_file" , default = "report.json" ,
98
113
help = "Write a JSON file with test results. Default is '%default'." )
99
114
115
+ parser .add_option ("--project" , dest = "project" , default = "mongodb-mongo-master" ,
116
+ help = "The project the test history will be requested for." )
117
+
100
118
parser .add_option ("--testListFile" , dest = "test_list_file" , default = None , metavar = "TESTLIST" ,
101
119
help = "Load a JSON file with tests to run." )
102
120
@@ -461,7 +479,101 @@ def _get_run_buildvariant(options):
461
479
return options .buildvariant
462
480
463
481
464
- def create_generate_tasks_config (evg_config , options , tests_by_task , include_gen_task ):
482
+ def _parse_avg_test_runtime (test , task_avg_test_runtime_stats ):
483
+ """
484
+ Parse list of teststats to find runtime for particular test.
485
+
486
+ :param task_avg_test_runtime_stats: Teststat data.
487
+ :param test: Test name.
488
+ :return: Historical average runtime of the test.
489
+ """
490
+ for test_stat in task_avg_test_runtime_stats :
491
+ if test_stat .test_name == test :
492
+ return test_stat .runtime
493
+ return None
494
+
495
+
496
+ def _calculate_timeout (avg_test_runtime ):
497
+ """
498
+ Calculate timeout_secs for the Evergreen task.
499
+
500
+ :param avg_test_runtime: How long a test has historically taken to run.
501
+ :return: The test runtime times AVG_TEST_TIME_MULTIPLIER, or MIN_AVG_TEST_TIME_SEC (whichever
502
+ is higher).
503
+ """
504
+ return max (MIN_AVG_TEST_TIME_SEC , ceil (avg_test_runtime * AVG_TEST_TIME_MULTIPLIER ))
505
+
506
+
507
+ def _calculate_exec_timeout (options , avg_test_runtime ):
508
+ """
509
+ Calculate exec_timeout_secs for the Evergreen task.
510
+
511
+ :param avg_test_runtime: How long a test has historically taken to run.
512
+ :return: repeat_tests_secs + an amount of padding time so that the test has time to finish on
513
+ its final run.
514
+ """
515
+ test_execution_time_over_limit = avg_test_runtime - (
516
+ options .repeat_tests_secs % avg_test_runtime )
517
+ test_execution_time_over_limit = max (MIN_AVG_TEST_OVERFLOW_SEC , test_execution_time_over_limit )
518
+ return ceil (options .repeat_tests_secs +
519
+ (test_execution_time_over_limit * AVG_TEST_TIME_MULTIPLIER ))
520
+
521
+
522
+ def _generate_timeouts (options , commands , test , task_avg_test_runtime_stats ):
523
+ """
524
+ Add timeout.update command to list of commands for a burn in execution task.
525
+
526
+ :param options: Command line options.
527
+ :param commands: List of commands for a burn in execution task.
528
+ :param test: Test name.
529
+ :param task_avg_test_runtime_stats: Teststat data.
530
+ """
531
+ if task_avg_test_runtime_stats :
532
+ avg_test_runtime = _parse_avg_test_runtime (test , task_avg_test_runtime_stats )
533
+ if avg_test_runtime :
534
+ cmd_timeout = CmdTimeoutUpdate ()
535
+ LOGGER .debug ("Avg test runtime for test %s is: %s" , test , avg_test_runtime )
536
+
537
+ timeout = _calculate_timeout (avg_test_runtime )
538
+ cmd_timeout .timeout (timeout )
539
+
540
+ exec_timeout = _calculate_exec_timeout (options , avg_test_runtime )
541
+ cmd_timeout .exec_timeout (exec_timeout )
542
+
543
+ commands .append (cmd_timeout .validate ().resolve ())
544
+
545
+
546
+ def _get_task_runtime_history (evergreen_api , project , task , variant ):
547
+ """
548
+ Fetch historical average runtime for all tests in a task from Evergreen API.
549
+
550
+ :param evergreen_api: Evergreen API.
551
+ :param project: Project name.
552
+ :param task: Task name.
553
+ :param variant: Variant name.
554
+ :return: Test historical runtimes, parsed into teststat objects.
555
+ """
556
+ try :
557
+ end_date = datetime .datetime .utcnow ().replace (microsecond = 0 )
558
+ start_date = end_date - datetime .timedelta (days = AVG_TEST_RUNTIME_ANALYSIS_DAYS )
559
+ data = evergreen_api .test_stats_by_project (
560
+ project , after_date = start_date .strftime ("%Y-%m-%d" ),
561
+ before_date = end_date .strftime ("%Y-%m-%d" ), tasks = [task ], variants = [variant ],
562
+ group_by = "test" , group_num_days = AVG_TEST_RUNTIME_ANALYSIS_DAYS )
563
+ test_runtimes = teststats .TestStats (data ).get_tests_runtimes ()
564
+ LOGGER .debug ("Test_runtime data parsed from Evergreen history: %s" , test_runtimes )
565
+ return test_runtimes
566
+ except requests .HTTPError as err :
567
+ if err .response .status_code == requests .codes .SERVICE_UNAVAILABLE :
568
+ # Evergreen may return a 503 when the service is degraded.
569
+ # We fall back to returning no test history
570
+ return []
571
+ else :
572
+ raise
573
+
574
+
575
+ def create_generate_tasks_config (evergreen_api , evg_config , options , tests_by_task ,
576
+ include_gen_task ):
465
577
"""Create the config for the Evergreen generate.tasks file."""
466
578
# pylint: disable=too-many-locals
467
579
task_specs = []
@@ -470,6 +582,8 @@ def create_generate_tasks_config(evg_config, options, tests_by_task, include_gen
470
582
task_names .append (BURN_IN_TESTS_GEN_TASK )
471
583
for task in sorted (tests_by_task ):
472
584
multiversion_path = tests_by_task [task ].get ("use_multiversion" )
585
+ task_avg_test_runtime_stats = _get_task_runtime_history (evergreen_api , options .project ,
586
+ task , options .buildvariant )
473
587
for test_num , test in enumerate (tests_by_task [task ]["tests" ]):
474
588
sub_task_name = _sub_task_name (options , task , test_num )
475
589
task_names .append (sub_task_name )
@@ -485,6 +599,7 @@ def create_generate_tasks_config(evg_config, options, tests_by_task, include_gen
485
599
get_resmoke_repeat_options (options ), test ),
486
600
}
487
601
commands = []
602
+ _generate_timeouts (options , commands , test , task_avg_test_runtime_stats )
488
603
commands .append (CommandDefinition ().function ("do setup" ))
489
604
if multiversion_path :
490
605
run_tests_vars ["task_path_suffix" ] = multiversion_path
@@ -525,11 +640,11 @@ def create_tests_by_task(options):
525
640
return tests_by_task
526
641
527
642
528
- def create_generate_tasks_file (options , tests_by_task ):
643
+ def create_generate_tasks_file (evergreen_api , options , tests_by_task ):
529
644
"""Create the Evergreen generate.tasks file."""
530
645
531
646
evg_config = Configuration ()
532
- evg_config = create_generate_tasks_config (evg_config , options , tests_by_task ,
647
+ evg_config = create_generate_tasks_config (evergreen_api , evg_config , options , tests_by_task ,
533
648
include_gen_task = True )
534
649
_write_json_file (evg_config .to_map (), options .generate_tasks_file )
535
650
@@ -561,9 +676,15 @@ def run_tests(no_exec, tests_by_task, resmoke_cmd, report_file):
561
676
_write_json_file (test_results , report_file )
562
677
563
678
564
- def main ():
679
+ def main (evergreen_api ):
565
680
"""Execute Main program."""
566
681
682
+ logging .basicConfig (
683
+ format = "[%(asctime)s - %(name)s - %(levelname)s] %(message)s" ,
684
+ level = logging .DEBUG ,
685
+ stream = sys .stdout ,
686
+ )
687
+
567
688
options , args = parse_command_line ()
568
689
569
690
resmoke_cmd = _set_resmoke_cmd (options , args )
@@ -585,10 +706,10 @@ def main():
585
706
_write_json_file (tests_by_task , options .test_list_outfile )
586
707
587
708
if options .generate_tasks_file :
588
- create_generate_tasks_file (options , tests_by_task )
709
+ create_generate_tasks_file (evergreen_api , options , tests_by_task )
589
710
else :
590
711
run_tests (options .no_exec , tests_by_task , resmoke_cmd , options .report_file )
591
712
592
713
593
714
if __name__ == "__main__" :
594
- main ()
715
+ main (RetryingEvergreenApi . get_api ( config_file = CONFIG_FILE ) )
0 commit comments