@@ -24,7 +24,6 @@ C10_DIAGNOSTIC_POP()
2424namespace at {
2525
2626namespace {
27-
2827/*
2928 These const variables defined the fp32 precisions for different backend
3029 We have "generic", "cuda", "mkldnn" backend now and we can choose fp32
@@ -76,14 +75,6 @@ void check_fp32_prec_backend_and_op(
7675 return valid;
7776 }
7877
79- C10_ALWAYS_INLINE void warn_deprecated_fp32_precision_api (){
80- TORCH_WARN_ONCE (
81- " Please use the new API settings to control TF32 behavior, such as torch.backends.cudnn.conv.fp32_precision = 'tf32' "
82- " or torch.backends.cuda.matmul.fp32_precision = 'ieee'. Old settings, e.g, torch.backends.cuda.matmul.allow_tf32 = True, "
83- " torch.backends.cudnn.allow_tf32 = True, allowTF32CuDNN() and allowTF32CuBLAS() will be deprecated after Pytorch 2.9. Please see "
84- " https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices"
85- );
86- }
8778} // namespace
8879
8980Context::Context () = default ;
@@ -193,15 +184,13 @@ bool Context::allowTF32CuDNN(const std::string& op) const {
193184 } else {
194185 return float32Precision (" cuda" , op) == " tf32" ;
195186 }
196- warn_deprecated_fp32_precision_api ();
197187 return allow_tf32_cudnn;
198188}
199189
200190void Context::setAllowTF32CuDNN (bool b) {
201191 setFloat32Precision (" cuda" , " rnn" , b ? " tf32" : " none" );
202192 setFloat32Precision (" cuda" , " conv" , b ? " tf32" : " none" );
203193 allow_tf32_cudnn = b;
204- warn_deprecated_fp32_precision_api ();
205194}
206195
207196void Context::setSDPPriorityOrder (const std::vector<int64_t >& order) {
@@ -357,7 +346,6 @@ bool Context::allowTF32CuBLAS() const {
357346 " Current status indicate that you have used mix of the legacy and new APIs to set the TF32 status for cublas matmul. " ,
358347 " We suggest only using the new API to set the TF32 flag. See also: " ,
359348 " https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices" );
360- warn_deprecated_fp32_precision_api ();
361349 return allow_tf32_new;
362350}
363351
@@ -389,7 +377,6 @@ Float32MatmulPrecision Context::float32MatmulPrecision() const {
389377 " Current status indicate that you have used mix of the legacy and new APIs to set the matmul precision. " ,
390378 " We suggest only using the new API for matmul precision. See also: " ,
391379 " https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices" );
392- warn_deprecated_fp32_precision_api ();
393380 return float32_matmul_precision;
394381}
395382
@@ -406,7 +393,6 @@ std::string Context::float32Precision(const std::string& backend, const std::str
406393
407394void Context::setFloat32MatmulPrecision (const std::string &s) {
408395 auto match = [this ](const std::string & s_) {
409- warn_deprecated_fp32_precision_api ();
410396 // TODO: consider if CuDNN field needs to also be set for potential future CuDNN ops like multi-headed attention
411397 if (s_ == " highest" ) {
412398 float32_matmul_precision = at::Float32MatmulPrecision::HIGHEST;
0 commit comments