Skip to content

Commit a06141f

Browse files
Lucaskabelazou3519
andauthored
Delete deprecated fp32 precision warnings (pytorch#166956) (pytorch#166998)
The deprecation warning led to warning spamming in PyTorch APIs, like torch.compile. This is not how a deprecation warning should go: if we add a deprecation warning, we'd better update our built-in APIs to prevent warning spam. Pull Request resolved: pytorch#166956 Approved by: https://github.com/albanD (cherry picked from commit 527b110) Co-authored-by: Richard Zou <[email protected]>
1 parent 5b9f040 commit a06141f

File tree

1 file changed

+0
-14
lines changed

1 file changed

+0
-14
lines changed

aten/src/ATen/Context.cpp

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ C10_DIAGNOSTIC_POP()
2424
namespace at {
2525

2626
namespace {
27-
2827
/*
2928
These const variables defined the fp32 precisions for different backend
3029
We have "generic", "cuda", "mkldnn" backend now and we can choose fp32
@@ -76,14 +75,6 @@ void check_fp32_prec_backend_and_op(
7675
return valid;
7776
}
7877

79-
C10_ALWAYS_INLINE void warn_deprecated_fp32_precision_api(){
80-
TORCH_WARN_ONCE(
81-
"Please use the new API settings to control TF32 behavior, such as torch.backends.cudnn.conv.fp32_precision = 'tf32' "
82-
"or torch.backends.cuda.matmul.fp32_precision = 'ieee'. Old settings, e.g, torch.backends.cuda.matmul.allow_tf32 = True, "
83-
"torch.backends.cudnn.allow_tf32 = True, allowTF32CuDNN() and allowTF32CuBLAS() will be deprecated after Pytorch 2.9. Please see "
84-
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices"
85-
);
86-
}
8778
} // namespace
8879

8980
Context::Context() = default;
@@ -193,15 +184,13 @@ bool Context::allowTF32CuDNN(const std::string& op) const {
193184
} else {
194185
return float32Precision("cuda", op) == "tf32";
195186
}
196-
warn_deprecated_fp32_precision_api();
197187
return allow_tf32_cudnn;
198188
}
199189

200190
void Context::setAllowTF32CuDNN(bool b) {
201191
setFloat32Precision("cuda", "rnn", b ? "tf32" : "none");
202192
setFloat32Precision("cuda", "conv", b ? "tf32" : "none");
203193
allow_tf32_cudnn = b;
204-
warn_deprecated_fp32_precision_api();
205194
}
206195

207196
void Context::setSDPPriorityOrder(const std::vector<int64_t>& order) {
@@ -357,7 +346,6 @@ bool Context::allowTF32CuBLAS() const {
357346
"Current status indicate that you have used mix of the legacy and new APIs to set the TF32 status for cublas matmul. ",
358347
"We suggest only using the new API to set the TF32 flag. See also: ",
359348
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
360-
warn_deprecated_fp32_precision_api();
361349
return allow_tf32_new;
362350
}
363351

@@ -389,7 +377,6 @@ Float32MatmulPrecision Context::float32MatmulPrecision() const {
389377
"Current status indicate that you have used mix of the legacy and new APIs to set the matmul precision. ",
390378
"We suggest only using the new API for matmul precision. See also: ",
391379
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
392-
warn_deprecated_fp32_precision_api();
393380
return float32_matmul_precision;
394381
}
395382

@@ -406,7 +393,6 @@ std::string Context::float32Precision(const std::string& backend, const std::str
406393

407394
void Context::setFloat32MatmulPrecision(const std::string &s) {
408395
auto match = [this](const std::string & s_) {
409-
warn_deprecated_fp32_precision_api();
410396
// TODO: consider if CuDNN field needs to also be set for potential future CuDNN ops like multi-headed attention
411397
if (s_ == "highest") {
412398
float32_matmul_precision = at::Float32MatmulPrecision::HIGHEST;

0 commit comments

Comments
 (0)