Skip to content

Commit fa2bb34

Browse files
xunsonghtye1gujinghui
authored
Fix security scan for release owned by Xunsong (#3545)
Include all parts of under directories of `tests/gpu/` and `scripts` Signed-off-by: Xunsong, Huang <[email protected]> Co-authored-by: Ye Ting <[email protected]> Co-authored-by: Jinghui <[email protected]>
1 parent 88de66f commit fa2bb34

21 files changed

+88
-16330
lines changed

scripts/gpu/gen_code.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -724,8 +724,6 @@ def extract_schema(path):
724724
for line in open(path, "r"):
725725
line = line.strip()
726726
m = re.match(r"\s*([^\s].*);\s*//\s*([^\s].*)", line)
727-
if not m:
728-
continue
729727
if m:
730728
schema = m.group(2)
731729
if schema.startswith("{"):
@@ -754,7 +752,6 @@ def get_lazy_reorder(schema_name, argument):
754752
LAZY_REORDER_OPTIONAL_TENSOR.substitute(change_dict),
755753
change_dict["temp_name"],
756754
)
757-
option["actuals"][i] = change_dict["temp_name"]
758755
elif not argument["type"].startswith("const"):
759756
return LAZY_REORDER_TENSOR.substitute(change_dict), change_dict["name"]
760757
else:

scripts/tools/setup/cmake.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -124,12 +124,14 @@ def _get_cmake_command():
124124
# if IS_WINDOWS:
125125
# return cmake_command
126126
# cmake3 = 'cmake3' # which('cmake3')
127-
cmake3 = None
127+
# cmake3 = None
128128
cmake = "cmake" # which('cmake')
129-
if cmake3 is not None and CMake._get_version(cmake3) >= LooseVersion("3.5.0"):
130-
cmake_command = "cmake3"
131-
return cmake_command
132-
elif cmake is not None and CMake._get_version(cmake) >= LooseVersion("3.5.0"):
129+
# Security Scan: waste code as cmake3 is always None here.
130+
# if cmake3 is not None and CMake._get_version(cmake3) >= LooseVersion("3.5.0"):
131+
# cmake_command = "cmake3"
132+
# return cmake_command
133+
# elif cmake is not None and CMake._get_version(cmake) >= LooseVersion("3.5.0"):
134+
if cmake is not None and CMake._get_version(cmake) >= LooseVersion("3.5.0"):
133135
return cmake_command
134136
else:
135137
raise RuntimeError("no cmake or cmake3 with version >= 3.5.0 found")

scripts/tools/torchgen/executorch/model.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,12 +145,15 @@ def has_kernels(self, g: Union[NativeFunction, NativeFunctionsGroup]) -> bool:
145145
def get_kernels(
146146
self, g: Union[NativeFunction, NativeFunctionsGroup]
147147
) -> Dict[ETKernelKey, BackendMetadata]:
148+
f = None
148149
if isinstance(g, NativeFunction):
149150
f = g
150151
elif isinstance(g, NativeFunctionsGroup):
151152
f = g.functional
152153
else:
153154
assert_never(g)
155+
assert f is not None, \
156+
f"NoneTypeError: Cannot get funcion from {type(g).__name__}"
154157
if f.func.name not in self.index:
155158
return {}
156159
return self.index[f.func.name]

scripts/tools/torchgen/gen_functionalization_type.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -782,4 +782,3 @@ def gen_functionalization_definition(
782782
if g.mutable is not None:
783783
mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
784784
return mutation_defs
785-
return []

scripts/tools/torchgen/model.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -966,14 +966,15 @@ def __post_init__(self) -> None:
966966

967967
@property
968968
def has_composite_kernel(self) -> bool:
969+
# as scanned, self.has_composite_implicit_autograd_kernel here must be false
969970
return (
970-
self.has_composite_implicit_autograd_kernel
971-
or self.has_composite_explicit_autograd_kernel
972-
or self.has_composite_explicit_autograd_non_functional_kernel
973-
) or (
974-
self.has_composite_implicit_autograd_kernel
975-
and self.has_composite_implicit_autograd_nested_tensor_kernel
976-
)
971+
# self.has_composite_implicit_autograd_kernel or
972+
self.has_composite_explicit_autograd_kernel or
973+
self.has_composite_explicit_autograd_non_functional_kernel
974+
) # or (
975+
# self.has_composite_implicit_autograd_kernel
976+
# and self.has_composite_implicit_autograd_nested_tensor_kernel
977+
# )
977978

978979
@property
979980
def is_view_op(self) -> bool:
@@ -1236,12 +1237,15 @@ def has_kernel(self, g: Union[NativeFunction, NativeFunctionsGroup]) -> bool:
12361237
def get_kernel(
12371238
self, g: Union[NativeFunction, NativeFunctionsGroup]
12381239
) -> Optional[BackendMetadata]:
1240+
f = None
12391241
if isinstance(g, NativeFunction):
12401242
f = g
12411243
elif isinstance(g, NativeFunctionsGroup):
12421244
f = self.primary(g)
12431245
else:
12441246
assert_never(g)
1247+
assert f is not None, \
1248+
f"NoneTypeError: Cannot get funcion from {type(g).__name__}"
12451249
if f.func.name not in self.index:
12461250
return None
12471251
return self.index[f.func.name]

scripts/tools/torchgen/packaged/autograd/gen_inplace_or_view_type.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,10 @@ def unpack_args(f: NativeFunction) -> Tuple[List[str], List[Binding]]:
276276

277277
is_tensor_list = is_tensor_list_type(binding.argument.type)
278278
ref = (not is_nullable) and not is_tensor_list
279-
suffix = "_opt" if is_nullable and not is_tensor_list else ""
279+
# Security Scan: if is_nullable is True, loop will continue at #L275
280+
# which makes below line dead
281+
# suffix = "_opt" if is_nullable and not is_tensor_list else ""
282+
suffix = ""
280283
body.append(
281284
UNPACK_TENSOR.substitute(
282285
arg_name=binding.name,

tests/gpu/examples/test_margin_ranking_loss.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,6 @@ def _test_cpu(input1, input2, target, reduc):
3535
print(input1.grad)
3636
print(input2.grad)
3737
return input1, input2
38-
input1.grad.zero_()
39-
input2.grad.zero_()
4038

4139
def _test_dpcpp(input1, input2, target, reduc):
4240
loss = nn.MarginRankingLoss(reduction=reduc)

tests/gpu/examples/test_qconv_channels_last.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,11 @@ def test_qconv_simple_channels_last(self, dtype=torch.float):
1515
dtype_filters = torch.qint8
1616
zp_vec = [128, 2, 0]
1717
for with_relu in [True, False]:
18+
qconv_fn = torch.ops.quantized.conv2d
19+
if with_relu:
20+
qconv_fn = torch.ops.quantized.conv2d_relu
1821
for scale_in in [1.2, 1.6]:
1922
for zero_point_in in zp_vec: # torch u8, random zp, 0
20-
if with_relu:
21-
qconv_fn = torch.ops.quantized.conv2d_relu
22-
else:
23-
qconf_fn = torch.ops.quantized.conv2d
2423
scale_weight = 0.5
2524
scale_out = 0.5
2625
zero_point_out = 2
@@ -83,12 +82,11 @@ def test_qconv_simple_channels_last_3d(self, dtype=torch.float):
8382
dtype_filters = torch.qint8
8483
zp_vec = [128, 2, 0]
8584
for with_relu in [True, False]:
85+
qconv_fn = torch.ops.quantized.conv3d
86+
if with_relu:
87+
qconv_fn = torch.ops.quantized.conv3d_relu
8688
for scale_in in [1.2, 1.6]:
8789
for zero_point_in in zp_vec: # torch u8, random zp, 0
88-
if with_relu:
89-
qconv_fn = torch.ops.quantized.conv3d_relu
90-
else:
91-
qconf_fn = torch.ops.quantized.conv3d
9290
scale_weight = 0.5
9391
scale_out = 4.0
9492
zero_point_out = 2

tests/gpu/examples/test_qdeconv.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,10 +67,7 @@ def test_qdeconv3d_cpuref(self, dtype=torch.float):
6767

6868
X_gpu = X.to("xpu")
6969
W_gpu = W.to("xpu")
70-
if bias is not None:
71-
bias_gpu = bias.to("xpu")
72-
else:
73-
bias_gpu = None
70+
bias_gpu = None
7471

7572
# We do the s8 quantize in backend, the formula is qx = x / sc + 0
7673
qX_gpu = torch.quantize_per_tensor(

tests/gpu/pytorch/test/functorch/test_aotdispatch.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -168,17 +168,18 @@ def test_make_fx_no_decompose(self, device):
168168
# FIXME
169169
return self.skipTest("error: maximum recursion reached")
170170

171-
def f(x):
172-
return torch.tanh(x).sum()
171+
# Security Scan: dead code due to skipTest(), need to reopen in future
172+
# def f(x):
173+
# return torch.tanh(x).sum()
173174

174-
fx_f = make_fx(grad(f))(torch.randn(5))
175-
ops = {i.target for i in fx_f.graph.nodes}
175+
# fx_f = make_fx(grad(f))(torch.randn(5))
176+
# ops = {i.target for i in fx_f.graph.nodes}
176177

177-
self.assertEqual(torch.ops.aten.tanh_backward in ops, True)
178+
# self.assertEqual(torch.ops.aten.tanh_backward in ops, True)
178179

179-
fx_f = make_fx(grad(f), decomposition_table)(torch.randn(5))
180-
ops = {i.target for i in fx_f.graph.nodes}
181-
self.assertEqual(torch.ops.aten.tanh_backward in ops, False)
180+
# fx_f = make_fx(grad(f), decomposition_table)(torch.randn(5))
181+
# ops = {i.target for i in fx_f.graph.nodes}
182+
# self.assertEqual(torch.ops.aten.tanh_backward in ops, False)
182183

183184
def test_nnc_jit(self, device):
184185
def f(x):

0 commit comments

Comments
 (0)