Skip to content

Commit 84443bd

Browse files
c00wpytorchmergebot
authored andcommitted
feature_use: Remove JK from naming for feature use. (pytorch#143529)
See discussion in pytorch#142819 but TL;DR, since we're loging use but not direct JK reads, it's less confusing to use the logging Pull Request resolved: pytorch#143529 Approved by: https://github.com/ezyang
1 parent b8f3831 commit 84443bd

File tree

3 files changed

+5
-13
lines changed

3 files changed

+5
-13
lines changed

torch/_inductor/async_compile.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -186,9 +186,7 @@ def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
186186

187187
kernel = TritonCodeCache.load(kernel_name, source_code)
188188
if self.use_process_pool():
189-
set_feature_use(
190-
"pytorch/inductor:enable_parallel_compile_version (post_warmup)", True
191-
)
189+
set_feature_use("parallel_compile_post_warmup", True)
192190
# We want to support changing these env vars after (and while) the
193191
# process pool is running, so pass them to the subprocess to reset.
194192
env_vars = ["TORCHINDUCTOR_CACHE_DIR", "TRITON_CACHE_DIR"]
@@ -202,9 +200,7 @@ def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
202200
),
203201
)
204202
else:
205-
set_feature_use(
206-
"pytorch/inductor:enable_parallel_compile_version (post_warmup)", False
207-
)
203+
set_feature_use("parallel_compile_post_warmup", False)
208204
with dynamo_timed(
209205
"async_compile.precompile",
210206
log_pt2_compile_event=True,

torch/_inductor/compile_fx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,7 @@ def _compile_fx_inner(
630630
)
631631
local = config.fx_graph_cache
632632
remote = fx_graph_remote_cache
633-
set_feature_use("pytorch/remote_cache:fx_graph_memcache_version", use_cache)
633+
set_feature_use("fx_cache", use_cache)
634634

635635
# TODO: This is a hack purely to get some info to extract_tensor_metadata_for_cache_key,
636636
# figure out how to not have to modify example inputs

torch/_inductor/triton_bundler.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -143,13 +143,9 @@ def collect(
143143
"""
144144
if not TritonBundler.is_enabled():
145145
cls.end_compile()
146-
set_feature_use(
147-
"pytorch/remote_cache:bundle_triton_into_fx_graph_cache_v2", False
148-
)
146+
set_feature_use("triton_bundling", False)
149147
return [], None
150-
set_feature_use(
151-
"pytorch/remote_cache:bundle_triton_into_fx_graph_cache_v2", True
152-
)
148+
set_feature_use("triton_bundling", True)
153149

154150
with dynamo_timed(key="TritonBundler.collect", log_pt2_compile_event=True):
155151
entries = cls._entries

0 commit comments

Comments
 (0)