|
| 1 | +# coding=utf-8 |
| 2 | +# Copyright 2020 Google LLC |
| 3 | +# |
| 4 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +# you may not use this file except in compliance with the License. |
| 6 | +# You may obtain a copy of the License at |
| 7 | +# |
| 8 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +# |
| 10 | +# Unless required by applicable law or agreed to in writing, software |
| 11 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +# See the License for the specific language governing permissions and |
| 14 | +# limitations under the License. |
| 15 | +"""Module for collect data of loop unroll.""" |
| 16 | + |
| 17 | +import base64 |
| 18 | +import io |
| 19 | +import os |
| 20 | +import tempfile |
| 21 | +from typing import Dict, Optional, Tuple |
| 22 | + |
| 23 | +import gin |
| 24 | +import tensorflow as tf |
| 25 | + |
| 26 | +from google.protobuf import struct_pb2 # pytype: disable=pyi-error |
| 27 | +from compiler_opt.rl import compilation_runner |
| 28 | +from compiler_opt.rl import corpus |
| 29 | + |
| 30 | + |
| 31 | +@gin.configurable(module='runners') |
| 32 | +class LoopUnrollRunner(compilation_runner.CompilationRunner): |
| 33 | + """Class for collecting data for loop partial unroll. |
| 34 | +
|
| 35 | + Usage: |
| 36 | + runner = LoopUnrollRunner( |
| 37 | + clang_path, llvm_objcopy_path, parse_reward_script_path, |
| 38 | + moving_average_decay_rate) |
| 39 | + policy_reward = unroll.collect_data( |
| 40 | + ir_path, tf_policy_path, default_reward, moving_average_reward) |
| 41 | + """ |
| 42 | + |
| 43 | + def __init__(self, llvm_objcopy_path: str, parse_reward_script_path: str, |
| 44 | + latency_coefficient: str, *args, **kwargs): |
| 45 | + super().__init__(*args, **kwargs) |
| 46 | + self._llvm_objcopy_path = llvm_objcopy_path |
| 47 | + self._parse_reward_script_path = parse_reward_script_path |
| 48 | + self._latency_coefficient = float(latency_coefficient) |
| 49 | + |
| 50 | + def compile_fn( |
| 51 | + self, module_spec: corpus.ModuleSpec, tf_policy_path: str, |
| 52 | + reward_only: bool, cancellation_manager: Optional[ |
| 53 | + compilation_runner.WorkerCancellationManager] |
| 54 | + ) -> Dict[str, Tuple[tf.train.SequenceExample, float]]: |
| 55 | + """Run loop unroll for the given IR file under the given policy. |
| 56 | +
|
| 57 | + Args: |
| 58 | + module_spec: a ModuleSpec. |
| 59 | + tf_policy_path: path to TF policy directory on local disk. |
| 60 | + reward_only: whether to only return reward (icache pressure and latency) |
| 61 | + cancellation_manager: handler for early termination by killing any running |
| 62 | + processes |
| 63 | +
|
| 64 | + Returns: |
| 65 | + For loop unroll, the result is in module level. IWS and Latency is |
| 66 | + already weighted by the probability to be executed, checkout |
| 67 | + parse_reward.py and code embedded under AsmPrinter.cpp for more detail). |
| 68 | +
|
| 69 | + Since the reward is calculated at late stage in a compiler that is after |
| 70 | + inlining some functions may be inlined and not be found for some loops, |
| 71 | + so we sum all functions into a single float, reward_total. |
| 72 | +
|
| 73 | + The function returns in the format: |
| 74 | + { |
| 75 | + "loop1_key": (loop1_features, reward_total), |
| 76 | + "loop2_key": (loop2_features, reward_total), |
| 77 | + ..., |
| 78 | + "loopN_key": (loopN_features, reward_total) |
| 79 | + } |
| 80 | + - reward_total: sum of IWS and Latency of all functions in this module |
| 81 | +
|
| 82 | + Early return: |
| 83 | + The function early returns when the compiled module doesn't record any |
| 84 | + logs or the log file doesn't record any loop. This happens when |
| 85 | + `LoopUnrollPass` is not triggered or no loop triggered "partial unroll" |
| 86 | + in the pass. |
| 87 | + """ |
| 88 | + working_dir = tempfile.mkdtemp() |
| 89 | + |
| 90 | + # The compiler will log input feature (loop properties) and decision |
| 91 | + # (unroll count) into the specified log path |
| 92 | + log_path = os.path.join(working_dir, 'log') |
| 93 | + |
| 94 | + # The compilation will generate object files, and our augmentation under |
| 95 | + # AsmPrinter.cpp will create section data `llvm_block_data`. |
| 96 | + object_path = os.path.join(working_dir, 'object') |
| 97 | + # llvm-objcopy extracts the section data from object to data |
| 98 | + data_path = os.path.join(working_dir, 'data') |
| 99 | + # Reward parsing script parses data into parsed_reward |
| 100 | + parsed_reward_path = os.path.join(working_dir, 'parsed_reward') |
| 101 | + |
| 102 | + try: |
| 103 | + # Construct command to execute clang |
| 104 | + command_line = [] |
| 105 | + |
| 106 | + # parameters for MLGO unroll |
| 107 | + command_line.extend([self._clang_path] + list(module_spec.exec_cmd) + [ |
| 108 | + '-mllvm', '-mlgo-unroll-mode=training', '-mllvm', |
| 109 | + '-mlgo-unroll-training-log=' + |
| 110 | + log_path, '-mllvm', '-calc-reward', '-o', object_path |
| 111 | + ]) |
| 112 | + |
| 113 | + # Under `training mode`... |
| 114 | + # If model path is provided, compiler will use ModelUnderTrainingRunner |
| 115 | + # Otherwise, compiler will use NoInferenceModelRunner |
| 116 | + if tf_policy_path: |
| 117 | + command_line.extend( |
| 118 | + ['-mllvm', 'mlgo-unroll-train-model=' + tf_policy_path]) |
| 119 | + |
| 120 | + print('Command to execute clang: ', command_line) |
| 121 | + |
| 122 | + # run clang |
| 123 | + compilation_runner.start_cancellable_process(command_line, |
| 124 | + self._compilation_timeout, |
| 125 | + cancellation_manager) |
| 126 | + |
| 127 | + # A module may not generate a log if none of the loops go into the |
| 128 | + # LoopUnroll decision. Early return here if log_path cannot be found. |
| 129 | + if not os.path.exists(log_path): |
| 130 | + print('Early return, log file not found.') |
| 131 | + return {} |
| 132 | + |
| 133 | + # A log file may not have anything inside when none of the loops goes |
| 134 | + # into PartialUnroll decision. Early return a log file is created but |
| 135 | + # nothing inside. |
| 136 | + if os.path.getsize(log_path) == 0: |
| 137 | + print('Early return, log file contains nothing.') |
| 138 | + return {} |
| 139 | + |
| 140 | + # Run llvm-objcopy to get section data |
| 141 | + command_line = [ |
| 142 | + self._llvm_objcopy_path, |
| 143 | + '--dump-section=.llvm_block_data.=' + data_path, object_path |
| 144 | + ] |
| 145 | + print('Command to get section data: ', command_line) |
| 146 | + compilation_runner.start_cancellable_process(command_line, |
| 147 | + self._compilation_timeout, |
| 148 | + cancellation_manager) |
| 149 | + |
| 150 | + # Run parse_reward.py to get reward |
| 151 | + command_line = [ |
| 152 | + self._parse_reward_script_path, data_path, parsed_reward_path |
| 153 | + ] |
| 154 | + print('Command to parse reward: ', command_line) |
| 155 | + compilation_runner.start_cancellable_process(command_line, |
| 156 | + self._compilation_timeout, |
| 157 | + cancellation_manager) |
| 158 | + |
| 159 | + # Sum rewards of all functions into a single float |
| 160 | + reward_total = 0 |
| 161 | + with io.open(parsed_reward_path, 'r', encoding='utf-8') as reward_f: |
| 162 | + for line in reward_f.readlines(): |
| 163 | + line = line[:-1] # strip end-line |
| 164 | + items = line.split(',') |
| 165 | + assert len(items) == 3 |
| 166 | + # function_name = items[0] (commented out because currently unused) |
| 167 | + iws = float(items[1]) |
| 168 | + latency = float(items[2]) |
| 169 | + reward_total = reward_total + ( |
| 170 | + iws + latency * self._latency_coefficient) |
| 171 | + |
| 172 | + if reward_only: |
| 173 | + return {'default': (None, reward_total)} |
| 174 | + |
| 175 | + result = {} |
| 176 | + |
| 177 | + # Read training log, fill them in to result. |
| 178 | + sequence_examples = struct_pb2.Struct() |
| 179 | + with io.open(log_path, 'rb') as log_f: |
| 180 | + sequence_examples.ParseFromString(log_f.read()) |
| 181 | + |
| 182 | + for key, value in sequence_examples.fields.items(): |
| 183 | + entry = tf.train.SequenceExample() |
| 184 | + entry.ParseFromString(base64.b64decode(value.string_value)) |
| 185 | + |
| 186 | + if not entry.HasField('feature_lists'): |
| 187 | + continue |
| 188 | + |
| 189 | + result[key] = (entry, reward_total) |
| 190 | + |
| 191 | + finally: |
| 192 | + tf.io.gfile.rmtree(working_dir) |
| 193 | + |
| 194 | + return result |
0 commit comments