|
5 | 5 | """Generate Buildkite performance pipelines dynamically"""
|
6 | 6 | import os
|
7 | 7 |
|
8 |
| -from common import ( |
9 |
| - COMMON_PARSER, |
10 |
| - get_changed_files, |
11 |
| - group, |
12 |
| - overlay_dict, |
13 |
| - pipeline_to_json, |
14 |
| -) |
| 8 | +from common import COMMON_PARSER, group, overlay_dict, pipeline_to_json |
15 | 9 |
|
16 | 10 | # In `devtool_opts`, we restrict both the set of CPUs on which the docker container's threads can run,
|
17 | 11 | # and its memory node. For the cpuset, we pick a continuous set of CPUs from a single NUMA node
|
@@ -96,37 +90,25 @@ def build_group(test):
|
96 | 90 | action="append",
|
97 | 91 | )
|
98 | 92 |
|
99 |
| -RUN_TESTS = True |
100 |
| -if REVISION_A is not None: |
101 |
| - changed_files = get_changed_files(f"{REVISION_A}..{REVISION_B}") |
102 |
| - # Our A/B-Testing setup by design only A/B-tests firecracker binaries. |
103 |
| - # So we only trigger A/B-tests on file changes that have impact on the firecracker |
104 |
| - # binary. These include ".rs" files, "Cargo.toml" and "Cargo.lock" files, as well |
105 |
| - # as ".cargo/config". |
106 |
| - RUN_TESTS = any( |
107 |
| - x.suffix in [".rs", ".toml", ".lock", "config"] for x in changed_files |
108 |
| - ) |
109 |
| - |
110 | 93 | group_steps = []
|
111 | 94 |
|
112 |
| -if RUN_TESTS: |
113 |
| - args = parser.parse_args() |
114 |
| - tests = [perf_test[test] for test in args.test or perf_test.keys()] |
115 |
| - for test_data in tests: |
116 |
| - test_data.setdefault("platforms", args.platforms) |
117 |
| - test_data.setdefault("instances", args.instances) |
118 |
| - # use ag=1 instances to make sure no two performance tests are scheduled on the same instance |
119 |
| - test_data.setdefault("agents", {"ag": 1}) |
120 |
| - test_data = overlay_dict(test_data, args.step_param) |
121 |
| - test_data["retry"] = { |
122 |
| - "automatic": [ |
123 |
| - # Agent was lost, retry one time |
124 |
| - # this can happen if we terminate the instance or the agent gets |
125 |
| - # disconnected for whatever reason |
126 |
| - {"exit_status": -1, "limit": 1}, |
127 |
| - ] |
128 |
| - } |
129 |
| - group_steps.append(build_group(test_data)) |
| 95 | +args = parser.parse_args() |
| 96 | +tests = [perf_test[test] for test in args.test or perf_test.keys()] |
| 97 | +for test_data in tests: |
| 98 | + test_data.setdefault("platforms", args.platforms) |
| 99 | + test_data.setdefault("instances", args.instances) |
| 100 | + # use ag=1 instances to make sure no two performance tests are scheduled on the same instance |
| 101 | + test_data.setdefault("agents", {"ag": 1}) |
| 102 | + test_data = overlay_dict(test_data, args.step_param) |
| 103 | + test_data["retry"] = { |
| 104 | + "automatic": [ |
| 105 | + # Agent was lost, retry one time |
| 106 | + # this can happen if we terminate the instance or the agent gets |
| 107 | + # disconnected for whatever reason |
| 108 | + {"exit_status": -1, "limit": 1}, |
| 109 | + ] |
| 110 | + } |
| 111 | + group_steps.append(build_group(test_data)) |
130 | 112 |
|
131 | 113 | pipeline = {
|
132 | 114 | "env": {},
|
|
0 commit comments