|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +# Copyright 2023 SCION Association |
| 4 | +# |
| 5 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | +# you may not use this file except in compliance with the License. |
| 7 | +# You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | + |
| 17 | +import logging |
| 18 | +import json |
| 19 | +import yaml |
| 20 | +from http.client import HTTPConnection |
| 21 | +from urllib.parse import urlencode |
| 22 | +from plumbum import cli |
| 23 | +from plumbum.cmd import cat, grep, wc |
| 24 | + |
| 25 | +from acceptance.common import base, docker |
| 26 | + |
| 27 | +logger = logging.getLogger(__name__) |
| 28 | + |
| 29 | +# This test relies ona specific topology router_bm.topo. |
| 30 | +# This topology is 1 core AS with two children and one core AS with none like so: |
| 31 | +# |
| 32 | +# CoreAS-A CoreAS-B |
| 33 | +# BR-A1 BR-A2 ---- BR-B |
| 34 | +# | | |
| 35 | +# BR-C BR-D |
| 36 | +# AS-C AS-D |
| 37 | + |
| 38 | +# Those values are valid expectations only when running in the CI environment. |
| 39 | +EXPECTATIONS = { |
| 40 | + 'in': 53000, |
| 41 | + 'out': 26000, |
| 42 | + 'in_transit': 73000, |
| 43 | + 'out_transit': 49000, |
| 44 | + 'br_transit': 73000, |
| 45 | +} |
| 46 | + |
| 47 | + |
| 48 | +class Test(base.TestTopogen): |
| 49 | + """ |
| 50 | + Tests that the performance of the router is within a satisfying (TBD) range. |
| 51 | + The test runs in a bespoke topology. |
| 52 | + """ |
| 53 | + |
| 54 | + ci = cli.Flag( |
| 55 | + "ci", |
| 56 | + help="Do extra checks for CI", |
| 57 | + envname="CI" |
| 58 | + ) |
| 59 | + |
| 60 | + def setup_prepare(self): |
| 61 | + super().setup_prepare() |
| 62 | + |
| 63 | + # The expected topology for this test is well-known: see router_bm.topo |
| 64 | + # This test is configured to match. |
| 65 | + |
| 66 | + # Distribute available cores among routers. The base schema is expressed as fractions of 12. |
| 67 | + # Then we scale and round. |
| 68 | + |
| 69 | + childRouterCores = 2 # *2 |
| 70 | + farRouterCores = 2 # *1 |
| 71 | + centerRouterCores = 3 # *2 |
| 72 | + availCores = int((cat['/proc/cpuinfo'] | grep['processor\\s:'] | wc['-l'])()) |
| 73 | + |
| 74 | + childRouterCores = int(childRouterCores * availCores / 12) |
| 75 | + farRouterCores = int(farRouterCores * availCores / 12) |
| 76 | + centerRouterCores = int(centerRouterCores * availCores / 12) |
| 77 | + |
| 78 | + if childRouterCores < 1: |
| 79 | + childRouterCores = 1 |
| 80 | + |
| 81 | + if farRouterCores < 1: |
| 82 | + farRouterCores = 1 |
| 83 | + |
| 84 | + if centerRouterCores < 1: |
| 85 | + centerRouterCores = 1 |
| 86 | + |
| 87 | + availCores -= (2 * childRouterCores + 2 * centerRouterCores + farRouterCores) |
| 88 | + |
| 89 | + # The truncations can leave us with up to 4 extra cores. Give first to the center routers, |
| 90 | + # if there's enough. |
| 91 | + if availCores > 1: |
| 92 | + availCores -= 2 |
| 93 | + centerRouterCores += 1 |
| 94 | + |
| 95 | + # The leftovers go to childRouterCores, even if it means allocating one extraneous core. |
| 96 | + if availCores > 0: |
| 97 | + childRouterCores += 1 |
| 98 | + |
| 99 | + coreCountUpdates = { |
| 100 | + 'br1-ff00_0_110-1': centerRouterCores, |
| 101 | + 'br1-ff00_0_110-2': centerRouterCores, |
| 102 | + 'br1-ff00_0_111-1': childRouterCores, |
| 103 | + 'br1-ff00_0_112-1': childRouterCores, |
| 104 | + 'br2-ff00_0_120-1': farRouterCores, |
| 105 | + } |
| 106 | + |
| 107 | + # Edit GOMAXPROC for all routers in the docker compose file. |
| 108 | + scion_dc = self.artifacts / "gen/scion-dc.yml" |
| 109 | + with open(scion_dc, "r") as file: |
| 110 | + dc = yaml.load(file, Loader=yaml.FullLoader) |
| 111 | + |
| 112 | + for router, coreCnt in coreCountUpdates.items(): |
| 113 | + dc["services"][router]["environment"]["GOMAXPROCS"] = f"{coreCnt}" |
| 114 | + |
| 115 | + with open(scion_dc, "w") as file: |
| 116 | + yaml.dump(dc, file) |
| 117 | + |
| 118 | + def setup(self): |
| 119 | + super().setup() |
| 120 | + self.monitoring_dc = docker.Compose(compose_file=self.artifacts / "gen/monitoring-dc.yml") |
| 121 | + self.monitoring_dc("up", "-d") |
| 122 | + |
| 123 | + def _run(self): |
| 124 | + # Give some time for the topology to start. |
| 125 | + self.await_connectivity() |
| 126 | + |
| 127 | + # Start as-transiting load. With the router_bm topology |
| 128 | + |
| 129 | + # The subset noncore#nonlocalcore gives us outgoing traffic at each |
| 130 | + # child, incoming traffic at BR-B, AS-transit-in traffic at BR-A1, |
| 131 | + # and AS-transit-out traffic at BR-A2. There is a small amount of |
| 132 | + # in and out traffic everywhere, on top of that produced by the test. |
| 133 | + # We only consider the routers involved in the test. Those see much |
| 134 | + # higher rates... we use that to isolate them in the results without |
| 135 | + # having to compare instance labels with the topology data. |
| 136 | + logger.info("==> Starting load as-transit") |
| 137 | + loadtest = self.get_executable("end2end_integration") |
| 138 | + retCode, stdOut, stdErr = loadtest[ |
| 139 | + "-d", |
| 140 | + "-outDir", self.artifacts, |
| 141 | + "-name", "router_benchmark", |
| 142 | + "-cmd", "./bin/end2endblast", |
| 143 | + "-attempts", 1500000, |
| 144 | + "-timeout", "120s", # Timeout is for all attempts together |
| 145 | + "-parallelism", 100, |
| 146 | + "-subset", "noncore#core#remoteISD" |
| 147 | + ].run_tee() |
| 148 | + |
| 149 | + for line in stdOut.splitlines(): |
| 150 | + if line.startswith('metricsBegin'): |
| 151 | + _, beg, _, end = line.split() |
| 152 | + |
| 153 | + logger.info('==> Collecting in/out/as-transit performance metrics...') |
| 154 | + |
| 155 | + # The raw metrics are expressed in terms of core*seconds. We convert to machine*seconds |
| 156 | + # which allows us to provide a projected packet/s; ...more intuitive than packets/core*s. |
| 157 | + # We measure the rate over 10s. For best results we sample the end of the middle 10s of the |
| 158 | + # run. "beg" is the start time of the real action and "end" is the end time. |
| 159 | + sampleTime = (int(beg) + int(end) + 10) / 2 |
| 160 | + promQuery = urlencode({ |
| 161 | + 'time': f'{sampleTime}', |
| 162 | + 'query': ( |
| 163 | + 'sum by (instance, job, type) (' |
| 164 | + ' rate(router_output_pkts_total{job="BR"}[10s])' |
| 165 | + ')' |
| 166 | + '/ on (instance, job) group_left()' |
| 167 | + 'sum by (instance, job) (' |
| 168 | + ' 1 - (rate(process_runnable_seconds_total[10s])' |
| 169 | + ' / go_sched_maxprocs_threads)' |
| 170 | + ')' |
| 171 | + ) |
| 172 | + }) |
| 173 | + conn = HTTPConnection("localhost:9090") |
| 174 | + conn.request('GET', f'/api/v1/query?{promQuery}') |
| 175 | + resp = conn.getresponse() |
| 176 | + if resp.status != 200: |
| 177 | + raise RuntimeError(f'Unexpected response: {resp.status} {resp.reason}') |
| 178 | + |
| 179 | + pld = json.loads(resp.read().decode('utf-8')) |
| 180 | + results = pld['data']['result'] |
| 181 | + rateMap = {} |
| 182 | + for result in results: |
| 183 | + tt = result['metric']['type'] |
| 184 | + ts, val = result['value'] |
| 185 | + # 0 values should not enter in any averaging. In this test, a very |
| 186 | + # low rate means that the router wasn't involved in the test for |
| 187 | + # that traffic type. "Out" traffic is the only one that exists at |
| 188 | + # two routers. To cover that case, we average the rates for a given |
| 189 | + # traffic type. |
| 190 | + # TODO: figure a more reliable way to identify the tested routers. |
| 191 | + r = int(float(val)) |
| 192 | + if r < 5000: # Not a router of interest. |
| 193 | + continue |
| 194 | + if rateMap.get(tt) is None: |
| 195 | + rateMap[tt] = [] |
| 196 | + rateMap[tt].append(r) |
| 197 | + for tt, rates in rateMap.items(): |
| 198 | + total = 0 |
| 199 | + for r in rates: |
| 200 | + total += r |
| 201 | + rateMap[tt] = int(total / len(rates)) |
| 202 | + |
| 203 | + # Start br-transiting load. |
| 204 | + # The subset noncore#noncore gives us a mix of in and out traffic at |
| 205 | + # the childrem and pure BR-transit traffic at BR-A1. |
| 206 | + logger.info("==> Starting load br-transit") |
| 207 | + loadtest = self.get_executable("end2end_integration") |
| 208 | + retCode, stdOut, stdErr = loadtest[ |
| 209 | + "-d", |
| 210 | + "-outDir", self.artifacts, |
| 211 | + "-name", "router_benchmark", |
| 212 | + "-cmd", "./bin/end2endblast", |
| 213 | + "-attempts", 1500000, |
| 214 | + "-timeout", "120s", # Timeout is for all attempts together |
| 215 | + "-parallelism", 100, |
| 216 | + "-subset", "noncore#noncore#remoteAS" |
| 217 | + ].run_tee() |
| 218 | + |
| 219 | + for line in stdOut.splitlines(): |
| 220 | + if line.startswith('metricsBegin'): |
| 221 | + _, beg, _, end = line.split() |
| 222 | + |
| 223 | + logger.info('==> Collecting br-transit performance metrics...') |
| 224 | + |
| 225 | + # The raw metrics are expressed in terms of core*seconds. We convert to machine*seconds |
| 226 | + # which allows us to provide a projected packet/s; ...more intuitive than packets/core*s. |
| 227 | + # We're interested only in br_transit traffic. We measure the rate over 10s. For best |
| 228 | + # results we sample the end of the middle 10s of the run. "beg" is the start time of the |
| 229 | + # real action and "end" is the end time. |
| 230 | + sampleTime = (int(beg) + int(end) + 10) / 2 |
| 231 | + promQuery = urlencode({ |
| 232 | + 'time': f'{sampleTime}', |
| 233 | + 'query': ( |
| 234 | + 'sum by (instance, job) (' |
| 235 | + ' rate(router_output_pkts_total{job="BR", type="br_transit"}[10s])' |
| 236 | + ')' |
| 237 | + '/ on (instance, job) group_left()' |
| 238 | + 'sum by (instance, job) (' |
| 239 | + ' 1 - (rate(process_runnable_seconds_total[10s])' |
| 240 | + ' / go_sched_maxprocs_threads)' |
| 241 | + ')' |
| 242 | + ) |
| 243 | + }) |
| 244 | + conn = HTTPConnection("localhost:9090") |
| 245 | + conn.request('GET', f'/api/v1/query?{promQuery}') |
| 246 | + resp = conn.getresponse() |
| 247 | + if resp.status != 200: |
| 248 | + raise RuntimeError(f'Unexpected response: {resp.status} {resp.reason}') |
| 249 | + |
| 250 | + # There's only one router that has br_transit traffic. |
| 251 | + pld = json.loads(resp.read().decode('utf-8')) |
| 252 | + results = pld['data']['result'] |
| 253 | + tt = 'br_transit' |
| 254 | + rateMap[tt] = 0 |
| 255 | + for result in results: |
| 256 | + ts, val = result['value'] |
| 257 | + r = int(float(val)) |
| 258 | + if r != 0: |
| 259 | + rateMap[tt] = r |
| 260 | + |
| 261 | + # Fetch and log the number of cores used by Go. This may inform performance |
| 262 | + # modeling later. |
| 263 | + logger.info('==> Collecting number of cores...') |
| 264 | + promQuery = urlencode({ |
| 265 | + 'query': 'go_sched_maxprocs_threads{job="BR"}' |
| 266 | + }) |
| 267 | + |
| 268 | + conn = HTTPConnection("localhost:9090") |
| 269 | + conn.request('GET', f'/api/v1/query?{promQuery}') |
| 270 | + resp = conn.getresponse() |
| 271 | + if resp.status != 200: |
| 272 | + raise RuntimeError(f'Unexpected response: {resp.status} {resp.reason}') |
| 273 | + |
| 274 | + pld = json.loads(resp.read().decode('utf-8')) |
| 275 | + results = pld['data']['result'] |
| 276 | + for result in results: |
| 277 | + instance = result['metric']['instance'] |
| 278 | + _, val = result['value'] |
| 279 | + logger.info(f'Router Cores for {instance}: {int(val)}') |
| 280 | + |
| 281 | + # Log and check the performance... |
| 282 | + # If this is used as a CI test. Make sure that the performance is within the expected |
| 283 | + # ballpark. |
| 284 | + rateTooLow = [] |
| 285 | + for tt, exp in EXPECTATIONS.items(): |
| 286 | + if self.ci: |
| 287 | + logger.info(f'Packets/(machine*s) for {tt}: {rateMap[tt]} expected: {exp}') |
| 288 | + if rateMap[tt] < 0.8 * exp: |
| 289 | + rateTooLow.append(tt) |
| 290 | + else: |
| 291 | + logger.info(f'Packets/(machine*s) for {tt}: {rateMap[tt]}') |
| 292 | + |
| 293 | + if len(rateTooLow) != 0: |
| 294 | + raise RuntimeError(f'Insufficient performance for: {rateTooLow}') |
| 295 | + |
| 296 | + def teardown(self): |
| 297 | + self.monitoring_dc("down") |
| 298 | + super().teardown() |
| 299 | + |
| 300 | + |
| 301 | +if __name__ == '__main__': |
| 302 | + base.main(Test) |
0 commit comments