|
| 1 | +# coding=utf-8 |
| 2 | +# Copyright 2020 Google LLC |
| 3 | +# |
| 4 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +# you may not use this file except in compliance with the License. |
| 6 | +# You may obtain a copy of the License at |
| 7 | +# |
| 8 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +# |
| 10 | +# Unless required by applicable law or agreed to in writing, software |
| 11 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +# See the License for the specific language governing permissions and |
| 14 | +# limitations under the License. |
| 15 | +"""A script for running chromium based benchmarks |
| 16 | +
|
| 17 | +This script allows for running chromium benchmarks to evaluate the performance |
| 18 | +of MLGO regalloc models in a highly automated fashion. It will automatically |
| 19 | +recompile LLVM if requested with the release mode model specified, recompile |
| 20 | +the chromium benchmarks using the correct MLGO model/advisor, and then run |
| 21 | +a specified subset of those tests designed to minimize run to run variability. |
| 22 | +
|
| 23 | +Usage: |
| 24 | +PYTHONPATH=$PYTHONPATH:. python3 \ |
| 25 | + ./compiler_opt/benchmark/benchmark_chromium.py \ |
| 26 | + --compile_tests \ |
| 27 | + --advisor=release \ |
| 28 | + --chromium_src_path=/chromium/src \ |
| 29 | + --depot_tools_path=/depot_tools \ |
| 30 | + --llvm_build_path=/llvm-build \ |
| 31 | + --compile_llvm \ |
| 32 | + --model_path=/tmp/model \ |
| 33 | + --tensorflow_c_lib_path=/tmp/tensorflow \ |
| 34 | + --chromium_build_path=./out/Release \ |
| 35 | + --output_file=./output.json \ |
| 36 | + --perf_counters=mem_uops_retired.all_loads \ |
| 37 | + --perf_counters=mem_uops_retired.all_stores |
| 38 | +
|
| 39 | +Note that --perf_counters can be defined multiple times to grab more than one |
| 40 | +performance counter. Also note that the chromium_build_path is a relative |
| 41 | +directory. It is relative to the chromium source dir and specified this way |
| 42 | +as there appears to be problems building chromium outside of the source |
| 43 | +directory. |
| 44 | +""" |
| 45 | + |
| 46 | +import os |
| 47 | +import shutil |
| 48 | +import subprocess |
| 49 | +import json |
| 50 | + |
| 51 | +from absl import flags |
| 52 | +from absl import app |
| 53 | + |
| 54 | +from compiler_opt.benchmark import gtest_executable_utils |
| 55 | +from compiler_opt.benchmark import benchmarking_utils |
| 56 | + |
| 57 | +from typing import List, Dict, Union |
| 58 | + |
| 59 | +FLAGS = flags.FLAGS |
| 60 | + |
| 61 | +default_test_descriptions = [ |
| 62 | + './compiler_opt/tools/chromium_test_descriptions/base_perftests.json', |
| 63 | + './compiler_opt/tools/chromium_test_descriptions/browser_tests.json', |
| 64 | + './compiler_opt/tools/chromium_test_descriptions/components_perftests.json' |
| 65 | +] |
| 66 | + |
| 67 | +flags.DEFINE_multi_string('test_description', default_test_descriptions, |
| 68 | + '(Can be defined multiple times) A path to a test' |
| 69 | + 'description JSON file containing the test executable' |
| 70 | + 'and the tests to run') |
| 71 | +flags.DEFINE_boolean('compile_tests', True, |
| 72 | + 'Whether or not to compile the tests from scratch') |
| 73 | +flags.DEFINE_enum('advisor', None, ['release', 'default'], |
| 74 | + 'The advisor to use when compiling chromium') |
| 75 | +flags.DEFINE_string('chromium_src_path', '/chromium/src', |
| 76 | + 'The path to the chromium source') |
| 77 | +flags.DEFINE_string('depot_tools_path', '/depot_tools', |
| 78 | + 'The path to your depot tools checkout') |
| 79 | +flags.DEFINE_string('llvm_build_path', '/llvm-build', |
| 80 | + 'The path to your llvm build') |
| 81 | +flags.DEFINE_boolean('compile_llvm', True, |
| 82 | + 'whether or not to compile llvm using the new model') |
| 83 | +flags.DEFINE_boolean('llvm_use_incremental', True, |
| 84 | + 'whether or not to use an incremental build while' |
| 85 | + 'compiling llvm') |
| 86 | +flags.DEFINE_string('llvm_source_path', '/llvm-project', |
| 87 | + 'The root path of your local llvm-project checkout') |
| 88 | +flags.DEFINE_string('model_path', '', |
| 89 | + 'The path to the model to use when compiling llvm') |
| 90 | +flags.DEFINE_string('tensorflow_c_lib_path', '/tmp/tensorflow', |
| 91 | + 'The path to an extracted copy of the tensorflow c library') |
| 92 | +flags.DEFINE_string('chromium_build_path', './out/Release', |
| 93 | + 'The chromium build path, relative to the chromium source' |
| 94 | + 'directory') |
| 95 | +flags.DEFINE_string('output_file', 'output.json', |
| 96 | + 'The path to the output file (in JSON format)') |
| 97 | +flags.DEFINE_integer('num_threads', 1, |
| 98 | + 'The number of threads to use when running benchmarks.' |
| 99 | + 'Should be used with caution') |
| 100 | +flags.DEFINE_multi_string('perf_counters', |
| 101 | + ['mem_uops_retired.all_loads', |
| 102 | + 'mem_uops_retired.all_stores'], |
| 103 | + 'The performance counters to use') |
| 104 | + |
| 105 | +def build_chromium_tests(regalloc_advisor: str, |
| 106 | + chromium_build_path: str, |
| 107 | + chromium_source_path: str, |
| 108 | + depot_tools_path: str, |
| 109 | + llvm_build_path: str, |
| 110 | + tests_to_build: List[str]): |
| 111 | + """Builds the chromium test suite |
| 112 | +
|
| 113 | + This function will build the specified chromium tests using the specified |
| 114 | + regalloc advisor. This function configures some default build options using gn |
| 115 | + as shown below in the gn_args list, and then builds the needed targets using |
| 116 | + autoninja. |
| 117 | +
|
| 118 | + Args: |
| 119 | + regalloc_advisor: The regalloc advisor to use when compiling |
| 120 | + chromium_build_path: The path (relative to the chromium source dir) to use |
| 121 | + for building chromium |
| 122 | + chromium_source_path: The path to the chromium source |
| 123 | + depot_tools_path: The path to the root of your depot tools checkout |
| 124 | + llvm_build_path: The path to the root of the direcotry where llvm was built |
| 125 | + tests_to_build: An array of test targets that are to be built |
| 126 | + """ |
| 127 | + chromium_absolute_build_path = os.path.join(chromium_source_path, |
| 128 | + chromium_build_path) |
| 129 | + if os.path.exists(chromium_absolute_build_path): |
| 130 | + shutil.rmtree(chromium_absolute_build_path) |
| 131 | + |
| 132 | + new_environment = os.environ.copy() |
| 133 | + new_environment['PATH'] += ':' + depot_tools_path |
| 134 | + new_environment['CC'] = os.path.join(llvm_build_path, './bin/clang') |
| 135 | + new_environment['CXX'] = os.path.join(llvm_build_path, './bin/clang++') |
| 136 | + new_environment['AR'] = os.path.join(llvm_build_path, './bin/llvm-ar') |
| 137 | + new_environment['NM'] = os.path.join(llvm_build_path, './bin/llvm-nm') |
| 138 | + new_environment['CPPFLAGS'] = \ |
| 139 | + f'-mllvm -regalloc-enable-advisor={regalloc_advisor}' |
| 140 | + |
| 141 | + gn_args = [ |
| 142 | + 'is_official_build=true', |
| 143 | + 'use_thin_lto=false', |
| 144 | + 'is_cfi=false', |
| 145 | + 'use_cfi_icall=false', |
| 146 | + 'use_cfi_cast=false', |
| 147 | + 'clang_use_chrome_plugins=false', |
| 148 | + 'is_debug=false', |
| 149 | + 'symbol_level=0', |
| 150 | + 'custom_toolchain=\\\"//build/toolchain/linux/unbundle:default\\\"', |
| 151 | + 'host_toolchain=\\\"//build/toolchain/linux/unbundle:default\\\"' |
| 152 | + ] |
| 153 | + |
| 154 | + gn_args_string = '--args="' |
| 155 | + for arg in gn_args: |
| 156 | + gn_args_string += arg + ' ' |
| 157 | + gn_args_string += '"' |
| 158 | + |
| 159 | + gn_config_command = 'gn gen ' + chromium_build_path + ' ' + gn_args_string |
| 160 | + with subprocess.Popen(gn_config_command, |
| 161 | + env=new_environment, |
| 162 | + cwd=chromium_source_path, |
| 163 | + shell=True) as gn_config_process: |
| 164 | + gn_config_process.wait() |
| 165 | + |
| 166 | + ninja_compile_command = ['autoninja', '-C', chromium_build_path] |
| 167 | + ninja_compile_command.extend(tests_to_build) |
| 168 | + with subprocess.Popen(ninja_compile_command, |
| 169 | + env=new_environment, |
| 170 | + cwd=chromium_source_path) as ninja_compile_process: |
| 171 | + ninja_compile_process.wait() |
| 172 | + |
| 173 | +def run_tests(tests_to_run: List[Dict[str,Union[str, List[str]]]], |
| 174 | + chromium_absolute_build_path: str, |
| 175 | + num_threads: int, |
| 176 | + perf_counters: List[str]): |
| 177 | + """A utility to run a set of chromium tests |
| 178 | +
|
| 179 | + This function takes in a list of test descriptions containing the |
| 180 | + name of a chromium test target as well as a list of all the tests |
| 181 | + within that test executable that are to be run. It executes each test |
| 182 | + (in parallel if specified for performance counters that aren't highly |
| 183 | + sensitive to that environment), grabbing the perf counters for the test |
| 184 | + that are specified. |
| 185 | +
|
| 186 | + Args: |
| 187 | + tests_to_run: A list of python dictionaries containing the test descriptions |
| 188 | + chromium_absolute_build_path: The absolute build path to the chromium |
| 189 | + build dir |
| 190 | + num_threads: The number of threads to use when running tests |
| 191 | + perf_counters: A list of perf compatible performance counters |
| 192 | + """ |
| 193 | + test_data = [] |
| 194 | + for test in tests_to_run: |
| 195 | + executable_path = os.path.join(chromium_absolute_build_path, |
| 196 | + test['executable']) |
| 197 | + test_data.extend(gtest_executable_utils.run_test_suite(test, |
| 198 | + executable_path, |
| 199 | + perf_counters, |
| 200 | + num_threads)) |
| 201 | + return test_data |
| 202 | + |
| 203 | +def main(_): |
| 204 | + test_descriptions = [] |
| 205 | + for test_description in FLAGS.test_description: |
| 206 | + with open(test_description, encoding='UTF-8') as test_description_file: |
| 207 | + print(test_description) |
| 208 | + test_descriptions.append(json.load(test_description_file)) |
| 209 | + test_executables = [] |
| 210 | + for test_description in test_descriptions: |
| 211 | + test_executables.append(test_description['executable']) |
| 212 | + |
| 213 | + if FLAGS.compile_llvm: |
| 214 | + benchmarking_utils.build_llvm(FLAGS.model_path, |
| 215 | + FLAGS.llvm_use_incremental, |
| 216 | + FLAGS.llvm_build_path, |
| 217 | + FLAGS.llvm_source_path, |
| 218 | + FLAGS.tensorflow_c_lib_path) |
| 219 | + |
| 220 | + if FLAGS.compile_tests: |
| 221 | + build_chromium_tests(FLAGS.advisor, |
| 222 | + FLAGS.chromium_build_path, |
| 223 | + FLAGS.chromium_src_path, |
| 224 | + FLAGS.depot_tools_path, |
| 225 | + FLAGS.llvm_build_path, |
| 226 | + test_executables) |
| 227 | + |
| 228 | + chromium_absolute_build_path = os.path.join(FLAGS.chromium_src_path, |
| 229 | + FLAGS.chromium_build_path) |
| 230 | + test_data = run_tests(test_descriptions, |
| 231 | + chromium_absolute_build_path, |
| 232 | + FLAGS.num_threads, |
| 233 | + FLAGS.perf_counters) |
| 234 | + |
| 235 | + with open(FLAGS.output_file, 'w', encoding='UTF-8') as output_file: |
| 236 | + output_data = { |
| 237 | + 'benchmarks': test_data |
| 238 | + } |
| 239 | + output_file.write(json.dumps(output_data, indent=4)) |
| 240 | + |
| 241 | +if __name__ == '__main__': |
| 242 | + app.run(main) |
0 commit comments