|
9 | 9 | #include <torch/csrc/lazy/core/tensor_util.h>
|
10 | 10 | #include <torch/csrc/lazy/core/util.h>
|
11 | 11 |
|
| 12 | +#include "absl/log/absl_check.h" |
12 | 13 | #include "torch_xla/csrc/aten_xla_bridge.h"
|
13 | 14 | #include "torch_xla/csrc/device.h"
|
14 | 15 | #include "torch_xla/csrc/ir_builder.h"
|
@@ -71,16 +72,11 @@ XLATensorImpl::XLATensorImpl(XLATensor&& tensor)
|
71 | 72 | GetTypeMeta(tensor),
|
72 | 73 | bridge::XlaDeviceToAtenDevice(tensor.GetDevice())),
|
73 | 74 | tensor_(c10::make_intrusive<XLATensor>(std::move(tensor))) {
|
74 |
| - // Update the Autocast key based off the backend device. |
75 |
| - // Upstream TensorImpl cannot differentiate between XLA:TPU and XLA:GPU |
76 |
| - // so we must manually update Autocast to AutocastCUDA on XLA:GPU. |
77 |
| - torch::lazy::BackendDevice current_device = bridge::GetCurrentDevice(); |
78 |
| - auto dev_type = static_cast<XlaDeviceType>(current_device.type()); |
79 |
| - if (dev_type == XlaDeviceType::CUDA) { |
80 |
| - auto autocast_cuda_ks = c10::DispatchKeySet(c10::DispatchKey::AutocastCUDA); |
81 |
| - auto autocast_xla_ks = c10::DispatchKeySet(c10::DispatchKey::AutocastXLA); |
82 |
| - key_set_ = (key_set_ - autocast_xla_ks) | autocast_cuda_ks; |
83 |
| - } |
| 75 | + auto dev_type = static_cast<XlaDeviceType>(bridge::GetCurrentDevice().type()); |
| 76 | + ABSL_CHECK(dev_type != XlaDeviceType::CUDA) |
| 77 | + << "XLA:CUDA is not supported anymore. " |
| 78 | + "If you are seeing this error, report a bug to the PyTorch/XLA GitHub " |
| 79 | + "repository: https://github.com/pytorch/xla"; |
84 | 80 | const_cast<XLATensorImpl*>(this)->SetupSizeProperties();
|
85 | 81 | set_sizes_and_strides(sym_sizes_, c10::fromIntArrayRefSlow(
|
86 | 82 | sizes_and_strides_.strides_arrayref()));
|
|
0 commit comments