File tree Expand file tree Collapse file tree 2 files changed +12
-2
lines changed Expand file tree Collapse file tree 2 files changed +12
-2
lines changed Original file line number Diff line number Diff line change @@ -178,6 +178,13 @@ bool XLATensorImpl::is_contiguous_custom(at::MemoryFormat memory_format) const {
178
178
return true ;
179
179
}
180
180
181
+ c10::SymBool XLATensorImpl::sym_is_contiguous_custom (
182
+ at::MemoryFormat memory_format) const {
183
+ // Storage is always contiguous, but the tensor metadata is_contiguous_ might
184
+ // be false due to the update in the functionalization layer..
185
+ return true ;
186
+ }
187
+
181
188
void XLATensorImpl::SetupSizeProperties () {
182
189
size_t generation = tensor_->generation ();
183
190
if (generation != generation_) {
Original file line number Diff line number Diff line change @@ -51,7 +51,10 @@ class XLATensorImpl : public c10::TensorImpl {
51
51
52
52
int64_t numel_custom () const override ;
53
53
54
- bool is_contiguous_custom (at::MemoryFormat memory_format) const override ;
54
+ // TODO add override once https://github.com/pytorch/pytorch/pull/155590 lands
55
+ // and remove is_contiguous_custom.
56
+ bool is_contiguous_custom (at::MemoryFormat memory_format) const ;
57
+ c10::SymBool sym_is_contiguous_custom (at::MemoryFormat memory_format) const ;
55
58
56
59
const at::Storage& storage () const override ;
57
60
@@ -72,4 +75,4 @@ class XLATensorImpl : public c10::TensorImpl {
72
75
73
76
} // namespace torch_xla
74
77
75
- #endif // XLA_TORCH_XLA_CSRC_TENSOR_IMPL_H_
78
+ #endif // XLA_TORCH_XLA_CSRC_TENSOR_IMPL_H_
You can’t perform that action at this time.
0 commit comments