Skip to content

Commit e2c6834

Browse files
Revert "deprecate check_is_size and guard_size_oblivious (pytorch#167198)"
This reverts commit 50bf1f0. Reverted pytorch#167198 on behalf of https://github.com/pytorch-auto-revert due to Reverted automatically by pytorch's autorevert, to avoid this behaviour add the tag autorevert: disable ([comment](pytorch#167198 (comment)))
1 parent 0e7235e commit e2c6834

File tree

8 files changed

+66
-57
lines changed

8 files changed

+66
-57
lines changed

test/dynamo/test_higher_order_ops.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -727,7 +727,7 @@ def k(x):
727727
x = torch.randn(3)
728728
arg_count = ifdynstaticdefault(4, 5)
729729
# when compiled with dynamic, we don't have upper bound runtime assertions for u0
730-
expected_op_count = ifdynstaticdefault(9, 7)
730+
expected_op_count = ifdynstaticdefault(10, 8)
731731
out_graph = self._test_wrap_simple(
732732
f,
733733
default_args_generator((x,)),
@@ -747,6 +747,7 @@ def forward(self, s77: "Sym(s77)", L_x_: "f32[s77]"):
747747
c: "i64[u0, 1]" = l_x_.nonzero()
748748
749749
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
750+
_check_is_size = torch._check_is_size(sym_size_int_1); _check_is_size = None
750751
751752
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0
752753
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
@@ -783,6 +784,7 @@ def forward(self, L_x_: "f32[3]"):
783784
c: "i64[u0, 1]" = l_x_.nonzero()
784785
785786
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
787+
_check_is_size = torch._check_is_size(sym_size_int_1); _check_is_size = None
786788
787789
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0
788790
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
@@ -881,7 +883,7 @@ def k(x):
881883
x = torch.randn(3)
882884
arg_count = ifdynstaticdefault(4, 5)
883885
# when compiled with dynamic, we don't have upper bound runtime assertions for u0
884-
expected_op_count = ifdynstaticdefault(9, 7)
886+
expected_op_count = ifdynstaticdefault(10, 8)
885887
out_graph = self._test_wrap_simple(
886888
f,
887889
default_args_generator((x,)),
@@ -903,6 +905,7 @@ def forward(self, L_x_: "f32[3]"):
903905
c: "i64[u0, 1]" = l_x_.nonzero()
904906
905907
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
908+
_check_is_size = torch._check_is_size(sym_size_int); _check_is_size = None
906909
907910
ge: "Sym(u0 >= 0)" = sym_size_int >= 0
908911
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
@@ -953,7 +956,7 @@ def k(x):
953956
y = torch.randn(3)
954957
arg_count = ifdynstaticdefault(5, 6)
955958
# when compiled with dynamic, we don't have upper bound runtime assertions for u0 and u1
956-
expected_op_count = ifdynstaticdefault(15, 11)
959+
expected_op_count = ifdynstaticdefault(17, 13)
957960
out_graph = self._test_wrap_simple(
958961
f,
959962
default_args_generator((x, y)),
@@ -974,6 +977,7 @@ def forward(self, L_x_: "f32[3]", L_y_: "f32[3]"):
974977
c: "i64[u0, 1]" = l_x_.nonzero()
975978
976979
sym_size_int_2: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
980+
_check_is_size = torch._check_is_size(sym_size_int_2); _check_is_size = None
977981
978982
ge: "Sym(u0 >= 0)" = sym_size_int_2 >= 0
979983
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
@@ -983,6 +987,7 @@ def forward(self, L_x_: "f32[3]", L_y_: "f32[3]"):
983987
d: "i64[u1, 1]" = l_y_.nonzero(); l_y_ = None
984988
985989
sym_size_int_3: "Sym(u1)" = torch.ops.aten.sym_size.int(d, 0)
990+
_check_is_size_1 = torch._check_is_size(sym_size_int_3); _check_is_size_1 = None
986991
987992
ge_1: "Sym(u1 >= 0)" = sym_size_int_3 >= 0
988993
_assert_scalar_default_2 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_default_2 = None

test/export/test_export.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3073,12 +3073,15 @@ def forward(self, x, y):
30733073
foo = torch.ops.export.foo.default(x, y); x = None
30743074
sym_size_int = torch.ops.aten.sym_size.int(foo, 0)
30753075
sym_size_int_1 = torch.ops.aten.sym_size.int(foo, 1)
3076+
sym_constrain_range_for_size_default = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int); sym_constrain_range_for_size_default = None
30763077
ge = sym_size_int >= 0; sym_size_int = None
30773078
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
3079+
sym_constrain_range_for_size_default_1 = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_1); sym_constrain_range_for_size_default_1 = None
30783080
ge_1 = sym_size_int_1 >= 0; sym_size_int_1 = None
30793081
_assert_scalar_default_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_default_1 = None
30803082
bar = torch.ops.export.bar.default(y); y = None
30813083
sym_size_int_2 = torch.ops.aten.sym_size.int(bar, 0)
3084+
sym_constrain_range_for_size_default_2 = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_2); sym_constrain_range_for_size_default_2 = None
30823085
ge_2 = sym_size_int_2 >= 0; sym_size_int_2 = None
30833086
_assert_scalar_default_2 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u2 >= 0 on node 'ge_2'"); ge_2 = _assert_scalar_default_2 = None
30843087
return (foo, bar)""",
@@ -17732,6 +17735,7 @@ def forward(self, x, mask):
1773217735
def forward(self, x, mask):
1773317736
masked_select = torch.ops.aten.masked_select.default(x, mask); x = mask = None
1773417737
sym_size_int_1 = torch.ops.aten.sym_size.int(masked_select, 0)
17738+
sym_constrain_range_for_size_default = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_1); sym_constrain_range_for_size_default = None
1773517739
ge = sym_size_int_1 >= 0
1773617740
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
1773717741
le = sym_size_int_1 <= 1188864

test/inductor/test_auto_functionalize.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1492,8 +1492,8 @@ def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77][1]cpu"):
14921492
clone: "f32[s77][1]cpu" = torch.ops.aten.clone.default(arg1_1)
14931493
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(clone); clone = None
14941494
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
1495-
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
1496-
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
1495+
ge_1: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
1496+
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
14971497
_to_copy: "f32[u0, 1][1, u0]cpu" = torch.ops.aten._to_copy.default(nonzero, dtype = torch.float32); nonzero = None
14981498
auto_functionalized_v2 = torch.ops.higher_order.auto_functionalized_v2(torch.ops.mylib.foo.default, _x_base_index = 0, _x_alias = True, _y_base_index = 1, _y_alias = True, _all_bases = [arg1_1, _to_copy]); _to_copy = None
14991499
getitem_1: "f32[s77][1]cpu" = auto_functionalized_v2[1]
@@ -1513,8 +1513,8 @@ def forward(self, arg0_1: "f32[2][1]cpu"):
15131513
clone: "f32[2][1]cpu" = torch.ops.aten.clone.default(arg0_1)
15141514
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(clone); clone = None
15151515
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
1516-
ge: "Sym(u0 >= 0)" = sym_size_int >= 0
1517-
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
1516+
ge_1: "Sym(u0 >= 0)" = sym_size_int >= 0
1517+
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
15181518
le: "Sym(u0 <= 2)" = sym_size_int <= 2; sym_size_int = None
15191519
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u0 <= 2 on node 'le'"); le = _assert_scalar_1 = None
15201520
_to_copy: "f32[u0, 1][1, u0]cpu" = torch.ops.aten._to_copy.default(nonzero, dtype = torch.float32); nonzero = None
@@ -1538,8 +1538,8 @@ def forward(self, arg0_1: "f32[2][1]cpu"):
15381538
def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77][1]cpu"):
15391539
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(arg1_1)
15401540
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
1541-
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
1542-
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
1541+
ge_1: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
1542+
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
15431543
convert_element_type: "f32[u0, 1][1, u0]cpu" = torch.ops.prims.convert_element_type.default(nonzero, torch.float32); nonzero = None
15441544
alias_default: "f32[s77][1]cpu" = torch.ops.aten.alias.default(arg1_1)
15451545
alias_default_1: "f32[u0, 1][1, u0]cpu" = torch.ops.aten.alias.default(convert_element_type)
@@ -1557,8 +1557,8 @@ def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77][1]cpu"):
15571557
def forward(self, arg0_1: "f32[2][1]cpu"):
15581558
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(arg0_1)
15591559
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
1560-
ge: "Sym(u0 >= 0)" = sym_size_int >= 0
1561-
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
1560+
ge_1: "Sym(u0 >= 0)" = sym_size_int >= 0
1561+
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
15621562
le: "Sym(u0 <= 2)" = sym_size_int <= 2; sym_size_int = None
15631563
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u0 <= 2 on node 'le'"); le = _assert_scalar_1 = None
15641564
convert_element_type: "f32[u0, 1][1, u0]cpu" = torch.ops.prims.convert_element_type.default(nonzero, torch.float32); nonzero = None

0 commit comments

Comments
 (0)