|
| 1 | +# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from typing import List, Optional, Generator |
| 16 | + |
| 17 | +from model_analyzer.model_analyzer_exceptions import TritonModelAnalyzerException |
| 18 | +from model_analyzer.config.input.config_command_profile import ConfigCommandProfile |
| 19 | +from model_analyzer.result.run_config_measurement import RunConfigMeasurement |
| 20 | + |
| 21 | +from math import log2 |
| 22 | + |
| 23 | +import logging |
| 24 | +from model_analyzer.constants import LOGGER_NAME, THROUGHPUT_MINIMUM_GAIN, THROUGHPUT_MINIMUM_CONSECUTIVE_CONCURRENCY_TRIES |
| 25 | + |
| 26 | +logger = logging.getLogger(LOGGER_NAME) |
| 27 | + |
| 28 | + |
| 29 | +class ConcurrencySearch(): |
| 30 | + """ |
| 31 | + Generates the next concurrency value to use when searching through |
| 32 | + RunConfigMeasurements for the best value (according to the users objective) |
| 33 | + - Will sweep from by powers of two from min to max concurrency |
| 34 | + - If the user specifies a constraint, the algorithm will perform a binary search |
| 35 | + around the boundary if the constraint is violated |
| 36 | + |
| 37 | + Invariant: It is necessary for the user to add new measurements as they are taken |
| 38 | + """ |
| 39 | + |
| 40 | + def __init__(self, config: ConfigCommandProfile) -> None: |
| 41 | + """ |
| 42 | + Parameters |
| 43 | + ---------- |
| 44 | + config: ConfigCommandProfile |
| 45 | + Profile configuration information |
| 46 | + """ |
| 47 | + self._min_concurrency_index = int( |
| 48 | + log2(config.run_config_search_min_concurrency)) |
| 49 | + self._max_concurrency_index = int( |
| 50 | + log2(config.run_config_search_max_concurrency)) |
| 51 | + self._max_binary_search_steps = config.run_config_search_max_binary_search_steps |
| 52 | + |
| 53 | + self._run_config_measurements: List[Optional[RunConfigMeasurement]] = [] |
| 54 | + self._concurrencies: List[int] = [] |
| 55 | + self._last_failing_concurrency = 0 |
| 56 | + self._last_passing_concurrency = 0 |
| 57 | + |
| 58 | + def add_run_config_measurement( |
| 59 | + self, |
| 60 | + run_config_measurement: Optional[RunConfigMeasurement]) -> None: |
| 61 | + """ |
| 62 | + Adds a new RunConfigMeasurement |
| 63 | + Invariant: Assumed that RCMs are added in the same order they are measured |
| 64 | + """ |
| 65 | + self._run_config_measurements.append(run_config_measurement) |
| 66 | + |
| 67 | + def search_concurrencies(self) -> Generator[int, None, None]: |
| 68 | + """ |
| 69 | + First performs a concurrency sweep, and then, if necessary, perform |
| 70 | + a binary concurrency search around the point where the constraint |
| 71 | + violated |
| 72 | + """ |
| 73 | + yield from self._perform_concurrency_sweep() |
| 74 | + |
| 75 | + if self._was_constraint_violated(): |
| 76 | + yield from self._perform_binary_concurrency_search() |
| 77 | + |
| 78 | + def _perform_concurrency_sweep(self) -> Generator[int, None, None]: |
| 79 | + for concurrency in (2**i for i in range( |
| 80 | + self._min_concurrency_index, self._max_concurrency_index + 1)): |
| 81 | + if self._should_continue_concurrency_sweep(): |
| 82 | + self._concurrencies.append(concurrency) |
| 83 | + yield concurrency |
| 84 | + else: |
| 85 | + logger.info( |
| 86 | + "Terminating concurrency sweep - throughput is decreasing") |
| 87 | + return |
| 88 | + |
| 89 | + def _should_continue_concurrency_sweep(self) -> bool: |
| 90 | + self._check_measurement_count() |
| 91 | + |
| 92 | + if not self._are_minimum_tries_reached(): |
| 93 | + return True |
| 94 | + else: |
| 95 | + return not self._has_objective_gain_saturated() |
| 96 | + |
| 97 | + def _check_measurement_count(self) -> None: |
| 98 | + if len(self._run_config_measurements) != len(self._concurrencies): |
| 99 | + raise TritonModelAnalyzerException(f"Internal Measurement count: {self._concurrencies}, doesn't match number " \ |
| 100 | + f"of measurements added: {len(self._run_config_measurements)}.") |
| 101 | + |
| 102 | + def _are_minimum_tries_reached(self) -> bool: |
| 103 | + if len(self._run_config_measurements |
| 104 | + ) < THROUGHPUT_MINIMUM_CONSECUTIVE_CONCURRENCY_TRIES: |
| 105 | + return False |
| 106 | + else: |
| 107 | + return True |
| 108 | + |
| 109 | + def _has_objective_gain_saturated(self) -> bool: |
| 110 | + gain = self._calculate_gain() |
| 111 | + return gain < THROUGHPUT_MINIMUM_GAIN |
| 112 | + |
| 113 | + def _calculate_gain(self) -> float: |
| 114 | + first_rcm = self._run_config_measurements[ |
| 115 | + -THROUGHPUT_MINIMUM_CONSECUTIVE_CONCURRENCY_TRIES] |
| 116 | + |
| 117 | + best_rcm = self._get_best_rcm() |
| 118 | + |
| 119 | + # These cover the cases where we don't get a result from PA |
| 120 | + if not first_rcm and not best_rcm: |
| 121 | + return 0 |
| 122 | + if not first_rcm: |
| 123 | + return 1 |
| 124 | + elif not best_rcm: |
| 125 | + return -1 |
| 126 | + else: |
| 127 | + gain = first_rcm.compare_measurements(best_rcm) |
| 128 | + |
| 129 | + return gain |
| 130 | + |
| 131 | + def _get_best_rcm(self) -> Optional[RunConfigMeasurement]: |
| 132 | + # Need to remove entries (None) with no result from PA before sorting |
| 133 | + pruned_rcms = [ |
| 134 | + rcm for rcm in self._run_config_measurements[ |
| 135 | + -THROUGHPUT_MINIMUM_CONSECUTIVE_CONCURRENCY_TRIES:] if rcm |
| 136 | + ] |
| 137 | + best_rcm = max(pruned_rcms) if pruned_rcms else None |
| 138 | + |
| 139 | + return best_rcm |
| 140 | + |
| 141 | + def _was_constraint_violated(self) -> bool: |
| 142 | + for i in range(len(self._run_config_measurements) - 1, 1, -1): |
| 143 | + if self._at_constraint_failure_boundary(i): |
| 144 | + self._last_failing_concurrency = self._concurrencies[i] |
| 145 | + self._last_passing_concurrency = self._concurrencies[i - 1] |
| 146 | + return True |
| 147 | + |
| 148 | + if self._run_config_measurements[ |
| 149 | + 0] and not self._run_config_measurements[ |
| 150 | + 0].is_passing_constraints(): |
| 151 | + self._last_failing_concurrency = self._concurrencies[i] |
| 152 | + self._last_passing_concurrency = 0 |
| 153 | + return True |
| 154 | + else: |
| 155 | + return False |
| 156 | + |
| 157 | + def _at_constraint_failure_boundary(self, index: int) -> bool: |
| 158 | + if not self._run_config_measurements[ |
| 159 | + index] or not self._run_config_measurements[index - 1]: |
| 160 | + return False |
| 161 | + |
| 162 | + at_failure_boundary = not self._run_config_measurements[ # type: ignore |
| 163 | + index].is_passing_constraints() and self._run_config_measurements[ |
| 164 | + index - # type: ignore |
| 165 | + 1].is_passing_constraints() |
| 166 | + |
| 167 | + return at_failure_boundary |
| 168 | + |
| 169 | + def _perform_binary_concurrency_search(self) -> Generator[int, None, None]: |
| 170 | + # This is needed because we are going to restart the search from the |
| 171 | + # concurrency that failed - so we expect this to be at the end of the list |
| 172 | + self._concurrencies.append(self._last_failing_concurrency) |
| 173 | + |
| 174 | + for i in range(0, self._max_binary_search_steps): |
| 175 | + concurrency = self._determine_next_binary_concurrency() |
| 176 | + |
| 177 | + if concurrency != self._concurrencies[-1]: |
| 178 | + self._concurrencies.append(concurrency) |
| 179 | + yield concurrency |
| 180 | + |
| 181 | + def _determine_next_binary_concurrency(self) -> int: |
| 182 | + if not self._run_config_measurements[-1]: |
| 183 | + return 0 |
| 184 | + |
| 185 | + if self._run_config_measurements[-1].is_passing_constraints(): |
| 186 | + self._last_passing_concurrency = self._concurrencies[-1] |
| 187 | + concurrency = int( |
| 188 | + (self._last_failing_concurrency + self._concurrencies[-1]) / 2) |
| 189 | + else: |
| 190 | + self._last_failing_concurrency = self._concurrencies[-1] |
| 191 | + concurrency = int( |
| 192 | + (self._last_passing_concurrency + self._concurrencies[-1]) / 2) |
| 193 | + |
| 194 | + return concurrency |
0 commit comments