Skip to content

Commit eedd4f9

Browse files
Hanyu-Jintye1
andauthored
Fix bad use of null-like value and internal links. (#4171) (#4177)
* Correct wrong link address. * Fix bad use of none-like value in _quantize_utils.py. --------- Co-authored-by: Ye Ting <[email protected]>
1 parent 500c444 commit eedd4f9

File tree

5 files changed

+4
-19
lines changed

5 files changed

+4
-19
lines changed

intel_extension_for_pytorch/quantization/_quantize_utils.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,7 @@ def _patched_module_call(self, *args, **kwargs):
286286

287287
hook_type = get_module_hook_type(parent_module, cur_module)
288288
if hook_type is HookType.OP_HOOKS:
289+
assert parent_module is not None
289290
parent_qstate: AutoQuantizationState = (
290291
parent_module._auto_quant_state
291292
) # type: ignore[union-attr, assignment]
@@ -674,6 +675,7 @@ def _patched_module_call(self, *args, **kwargs):
674675
hook_type = get_module_hook_type(parent_module, cur_module)
675676
if hook_type is HookType.OP_HOOKS:
676677
# before hooks
678+
assert parent_module is not None
677679
qstate: AutoQuantizationState = (
678680
parent_module._auto_quant_state
679681
) # type: ignore[union-attr, assignment]

scripts/tools/queryop/query_op.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,8 @@
66
"pytorch: https://github.com/pytorch/pytorch/tree/release/2.1"
77
88
if key == 'XPU':
9-
"Please set up your XPU environment: install pytorch-2.1 and ipex GPU",
10-
"pytorch: https://github.com/intel-innersource/frameworks.ai.pytorch.private-gpu.git"
11-
"ipex GPU: rebase/ipex-2.1 https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-gpu.git"
9+
"Please set up your XPU environment: install pytorch-2.1 and Intel® Extension for PyTorch* GPU"
10+
"Intel® Extension for PyTorch* GPU: https://github.com/intel/intel-extension-for-pytorch/tree/xpu-main"
1211
1312
usage example:
1413
if key == CPU or CUDA:

tests/cpu/test_weight_prepack.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,6 @@ def test_conv3d_training(self):
483483
# TODO: add inference case.
484484

485485
def _test_conv_nc11_base(self, dim):
486-
# related issue: https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-cpu/pull/86.
487486
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
488487
test_dtypes = [torch.float]
489488
if core.onednn_has_bf16_support():

tests/gpu/examples/test_inplace_binary_and_relu.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,6 @@
33
from torch.testing._internal.common_utils import TestCase
44
import intel_extension_for_pytorch # noqa
55

6-
"""
7-
Motivation: When running block format ResNet-XX with using inplaced binary add and relu,
8-
layer output will be flushed to shape zero. In this UT, it may throw a segmentfault error.
9-
This issue was fixed in commit: 193ef42,
10-
PR: https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-gpu/pull/668/commits
11-
"""
12-
13-
146
dpcpp_device = torch.device("xpu")
157
cpu_device = torch.device("cpu")
168

tests/gpu/examples/test_reshape.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,6 @@
66

77
class TestTorchMethod(TestCase):
88
def test_reshape_cat(self, dtype=torch.float):
9-
"""
10-
Issue: When run SSD-ResNet50 with block format, it raised a shape mismatch error in cat op.
11-
Root cause: There was no to_plain_if_needed() in reshape_alias.
12-
Without it, the size of tensor was changed by reshape but dims was kept unchanged.
13-
It led to the shape mismatch error in the next op cat.
14-
Fixed PR: https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-gpu/pull/709
15-
"""
169
with torch.xpu.onednn_layout():
1710
input1 = torch.randn([32, 3, 300, 300]).to("xpu")
1811
conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1, bias=False).to(

0 commit comments

Comments
 (0)