We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 189aa88 commit 369b80eCopy full SHA for 369b80e
aten/src/ATen/cuda/CUDABlas.cpp
@@ -1595,6 +1595,7 @@ void scaled_gemm(
1595
#else
1596
TORCH_CHECK(false, "scaled_gemm with `torch.float` outer vector scaling is only supported for CUDA 12.9 and above");
1597
#endif // if CUDA_VERSION >= 12090
1598
+ }
1599
1600
size_t workspaceSize = _getWorkspaceSize();
1601
auto workspace = at::empty(static_cast<int64_t>(workspaceSize), at::TensorOptions().dtype(at::kByte).device(at::kCUDA));
0 commit comments