We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0a9dcdd commit 768ab84Copy full SHA for 768ab84
torch/_inductor/codegen/triton.py
@@ -1003,14 +1003,14 @@ def relu(x):
1003
@staticmethod
1004
def minimum(a, b):
1005
if torch.version.hip:
1006
- return f"tl.minimum({a}, {b})"
+ return f"tl.minimum({a}, {b}, tl.PropagateNan.ALL)"
1007
else:
1008
return f"triton_helpers.minimum({a}, {b})"
1009
1010
1011
def maximum(a, b):
1012
1013
- return f"tl.maximum({a}, {b})"
+ return f"tl.maximum({a}, {b}, tl.PropagateNan.ALL)"
1014
1015
return f"triton_helpers.maximum({a}, {b})"
1016
0 commit comments