diff --git a/compiler_opt/rl/local_data_collector.py b/compiler_opt/rl/local_data_collector.py index 209f57f1..dee5e155 100644 --- a/compiler_opt/rl/local_data_collector.py +++ b/compiler_opt/rl/local_data_collector.py @@ -55,7 +55,7 @@ def __init__( # We remove this activity from the critical path by running it concurrently # with the training phase - i.e. whatever happens between successive data # collection calls. Subsequent runs will wait for these to finish. - self._reset_workers: concurrent.futures.Future = None + self._reset_workers: Optional[concurrent.futures.Future] = None self._current_work: List[Tuple[corpus.ModuleSpec, worker.WorkerFuture]] = [] self._pool = concurrent.futures.ThreadPoolExecutor() diff --git a/compiler_opt/rl/train_locally.py b/compiler_opt/rl/train_locally.py index 149265bc..3dc0cad5 100644 --- a/compiler_opt/rl/train_locally.py +++ b/compiler_opt/rl/train_locally.py @@ -45,7 +45,7 @@ flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'), 'Root directory for writing logs/summaries/checkpoints.') flags.DEFINE_string('data_path', None, - 'Path to CNS folder containing IR files.') + 'Path to directory containing the corpus.') flags.DEFINE_integer( 'num_workers', None, 'Number of parallel data collection workers. `None` for max available') @@ -98,7 +98,7 @@ def train_eval(agent_name=constant.AgentName.PPO, } saver = policy_saver.PolicySaver(policy_dict=policy_dict) - logging.info('Loading module specs from corpus.') + logging.info('Loading module specs from corpus at %s.', FLAGS.data_path) module_specs = corpus.build_modulespecs_from_datapath( FLAGS.data_path, problem_config.flags_to_add(), problem_config.flags_to_delete())