Skip to content

Commit 926700d

Browse files
authored
Prepare for pytorch tensor impl change in is_contiguous_custom (#9402)
1 parent 10ed554 commit 926700d

File tree

2 files changed

+12
-2
lines changed

2 files changed

+12
-2
lines changed

torch_xla/csrc/tensor_impl.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,13 @@ bool XLATensorImpl::is_contiguous_custom(at::MemoryFormat memory_format) const {
178178
return true;
179179
}
180180

181+
c10::SymBool XLATensorImpl::sym_is_contiguous_custom(
182+
at::MemoryFormat memory_format) const {
183+
// Storage is always contiguous, but the tensor metadata is_contiguous_ might
184+
// be false due to the update in the functionalization layer..
185+
return true;
186+
}
187+
181188
void XLATensorImpl::SetupSizeProperties() {
182189
size_t generation = tensor_->generation();
183190
if (generation != generation_) {

torch_xla/csrc/tensor_impl.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,10 @@ class XLATensorImpl : public c10::TensorImpl {
5151

5252
int64_t numel_custom() const override;
5353

54-
bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
54+
// TODO add override once https://github.com/pytorch/pytorch/pull/155590 lands
55+
// and remove is_contiguous_custom.
56+
bool is_contiguous_custom(at::MemoryFormat memory_format) const;
57+
c10::SymBool sym_is_contiguous_custom(at::MemoryFormat memory_format) const;
5558

5659
const at::Storage& storage() const override;
5760

@@ -72,4 +75,4 @@ class XLATensorImpl : public c10::TensorImpl {
7275

7376
} // namespace torch_xla
7477

75-
#endif // XLA_TORCH_XLA_CSRC_TENSOR_IMPL_H_
78+
#endif // XLA_TORCH_XLA_CSRC_TENSOR_IMPL_H_

0 commit comments

Comments
 (0)