We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 132349a commit 6889e5bCopy full SHA for 6889e5b
torch_xla/distributed/spmd/xla_sharded_tensor.py
@@ -323,4 +323,4 @@ def redistribute(self, device_mesh, placements, *, async_op: bool = False):
323
324
@classmethod
325
def __torch_function__(cls, func, types, args=(), kwargs=None):
326
- return super().__torch_function__(func, types, args, kwargs)
+ return super().__torch_function__(func, types, args, kwargs)
0 commit comments