Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit a5807e8

Browse files
authored
Merge pull request #83 from facebookresearch/tiny-improvement
Small improvement to parameters init fit autotuner
2 parents 253f7d9 + 627280b commit a5807e8

File tree

2 files changed

+10
-30
lines changed

2 files changed

+10
-30
lines changed

tensor_comprehensions/tc_unit.py

Lines changed: 8 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -159,39 +159,20 @@ def get_options_from_kwargs_and_tuner_cache(name, cache_file, options_cache, *in
159159
# TC autotuner class - ATen
160160
###############################################################################
161161
class TcAutotuner(object):
162-
def __init__(
163-
self,
164-
tc_lang,
165-
pop_size=10,
166-
crossover_rate=80,
167-
mutation_rate=7,
168-
generations=2,
169-
number_elites=1,
170-
threads=8,
171-
gpus="0",
172-
proto="/tmp/tuner.txt",
173-
restore_from_proto=False,
174-
restore_number=10,
175-
log_generations=False,
176-
tuner_min_launch_total_threads=64,
177-
**kwargs
178-
):
162+
def __init__(self, tc_lang, **kwargs):
179163
# tuner_cache will look like:
180164
# hash_key -> {"forward": options1, "backward": options2}
181165
self.tuner_cache = {}
182166
self.kwargs = kwargs
183167
self.tc_lang = tc_lang
184168
self.autotuner = ATenAutotuner(tc_lang)
185-
self.set_autotuner_settings(
186-
pop_size, crossover_rate, mutation_rate, generations, number_elites,
187-
threads, gpus, proto, restore_from_proto, restore_number,
188-
log_generations, tuner_min_launch_total_threads
189-
)
169+
self.set_autotuner_parameters(**kwargs)
190170

191-
def set_autotuner_settings(
192-
self, pop_size, crossover_rate, mutation_rate, generations, number_elites,
193-
threads, gpus, proto, restore_from_proto, restore_number, log_generations,
194-
tuner_min_launch_total_threads,
171+
def set_autotuner_parameters(
172+
self, pop_size=10, crossover_rate=80, mutation_rate=7, generations=2,
173+
number_elites=1, threads=8, gpus="0", proto="/tmp/tuner.txt",
174+
restore_from_proto=False, restore_number=10, log_generations=False,
175+
tuner_min_launch_total_threads=64, **kwargs
195176
):
196177
self.autotuner.pop_size(pop_size)
197178
self.autotuner.crossover_rate(crossover_rate)
@@ -602,7 +583,7 @@ def autotune(self, *inputs, **kwargs):
602583
else:
603584
# we do the init again so that the autotuner parameters are updated
604585
# properly if users change them
605-
self.tuner.__init__(self.lang, **kwargs)
586+
self.tuner.set_autotuner_parameters(**kwargs)
606587
return self.tuner.autotune(*inputs, **kwargs)
607588

608589
###############################################################################

test_python/test_tc_torch.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414
##############################################################################
1515

16-
import os, unittest, time, pdb
16+
import os, unittest, time
1717

1818
import torch
1919
import torch.cuda
@@ -52,7 +52,6 @@ def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) O_gra
5252
}}
5353
"""
5454

55-
# PATH_PREFIX = os.path.join(os.path.dirname(os.path.abspath(__file__)), "tc_test")
5655
PATH_PREFIX = os.path.join("/tmp/", "tc_test")
5756

5857
if not os.path.exists(PATH_PREFIX):
@@ -333,7 +332,7 @@ def test_conv_train_autotune_no_cache_no_options_seed(self):
333332
I, W = torch.randn(N, C, H, W).cuda(), torch.randn(O, C, kH, kW).cuda()
334333
convolution.autotune(I, W, **tc.autotuner_settings)
335334
# on the second call, autotuning will be seeded from previous best options
336-
convolution.autotune(I, W, **tc.autotuner_settings)
335+
convolution.autotune(I, W, **tc.autotuner_settings, generations=5, pop_size=20)
337336

338337
def test_conv_train_autotune_cache_no_options_seed(self):
339338
lang = CONV_TRAIN

0 commit comments

Comments
 (0)