Skip to content

Commit 5ba83f0

Browse files
authored
Removed references to inlining in general-purpose places (#240)
1 parent 0889219 commit 5ba83f0

File tree

4 files changed

+12
-13
lines changed

4 files changed

+12
-13
lines changed

compiler_opt/rl/regalloc/regalloc_runner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class RegAllocRunner(compilation_runner.CompilationRunner):
3434
runner = RegAllocRunner(
3535
clang_path, launcher_path, moving_average_decay_rate)
3636
serialized_sequence_example, default_reward, moving_average_reward,
37-
policy_reward = inliner.collect_data(
37+
policy_reward = runner.collect_data(
3838
ir_path, tf_policy_path, default_reward, moving_average_reward)
3939
"""
4040

@@ -44,7 +44,7 @@ def compile_fn(
4444
self, command_line: corpus.FullyQualifiedCmdLine, tf_policy_path: str,
4545
reward_only: bool,
4646
workdir: str) -> Dict[str, Tuple[tf.train.SequenceExample, float]]:
47-
"""Run inlining for the given IR file under the given policy.
47+
"""Run the compiler for the given IR file under the given policy.
4848
4949
Args:
5050
command_line: the fully qualified command line.

compiler_opt/rl/train_bc.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15-
r"""Train behavioral cloning policy for LLVM Inliner decision rule."""
15+
r"""Train behavioral cloning policy."""
1616

1717
import os
1818

@@ -54,7 +54,7 @@ def train_eval(agent_config_type=agent_config.BCAgentConfig,
5454
num_iterations=100,
5555
batch_size=64,
5656
train_sequence_length=1):
57-
"""Train for LLVM inliner."""
57+
"""Train Behavioral Cloning."""
5858
root_dir = os.path.expanduser(_ROOT_DIR.value)
5959
root_dir = os.path.normpath(root_dir)
6060
problem_config = registry.get_configuration()

compiler_opt/rl/train_locally.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
15-
r"""Train and Eval LLVM Inliner decision rule with local_data_collector."""
15+
r"""Train and Eval with local_data_collector."""
1616

1717
import collections
1818
import functools
@@ -72,7 +72,7 @@ def train_eval(worker_manager_class=LocalWorkerPoolManager,
7272
use_random_network_distillation=False,
7373
dump_best_trajectory=False,
7474
moving_average_decay_rate=1):
75-
"""Train for LLVM inliner."""
75+
"""Training coordinator."""
7676
root_dir = FLAGS.root_dir
7777
problem_config = registry.get_configuration()
7878
time_step_spec, action_spec = problem_config.get_signature_spec()

compiler_opt/tools/generate_vocab.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -156,15 +156,14 @@ def main(_) -> None:
156156
logging.info('Found valid sequence_features dict: %s', sequence_features)
157157
break
158158
except IndexError:
159-
# modules with no inlining done have empty feature values and
160-
# raise an IndexError.
161-
# continue until an inlined module with non-empty feature values is found.
162-
logging.warn('Found module that was not inlined and has empty feature '
163-
'values.')
159+
# modules with no results have empty feature values and
160+
# raise an IndexError. For example, in the inliner case, maybe there were
161+
# no inlining opportunities (very small modules)
162+
# continue until a module with non-empty feature values is found.
163+
logging.warn('Found module with empty feature values.')
164164
continue
165165
if not sequence_features:
166-
raise ValueError('No inlined module with non-empty sequence_features '
167-
'values found.')
166+
raise ValueError('No module with non-empty sequence_features values found.')
168167

169168
parser_fn = create_tfrecord_parser_fn(sequence_features)
170169
dataset = dataset.map(parser_fn, num_parallel_calls=tf.data.AUTOTUNE)

0 commit comments

Comments
 (0)