Skip to content

Commit b376a2c

Browse files
zhuhaozheDiweiSun
andauthored
Hz/fx fix (#3383)
* Update dependency_version.json * fix fx ut * Update dependency_version.json * Update dependency_version.json * Update dependency_version.json * fix fp8 test --------- Co-authored-by: DiweiSun <[email protected]>
1 parent 869cb14 commit b376a2c

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

dependency_version.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
"commit": "llvmorg-16.0.6"
88
},
99
"pytorch": {
10-
"version": "2.6.0.dev20241028+cpu"
10+
"version": "2.6.0.dev20241124+cpu"
1111
},
1212
"torchaudio": {
13-
"version": "2.5.0.dev20241028+cpu"
13+
"version": "2.5.0.dev20241121+cpu"
1414
},
1515
"torchvision": {
16-
"version": "0.20.0.dev20241028+cpu"
16+
"version": "0.20.0.dev20241121+cpu"
1717
},
1818
"torch-ccl": {
1919
"commit": "torch_ccl_dev_2.6",

tests/cpu/test_fp8_autocast.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ def forward(self, x):
193193
fp8_linear_with_calibration = MyModel()
194194
fp8_linear_with_calibration = prepare_fp8(fp8_linear_with_calibration)
195195
fp8_linear_with_calibration.load_state_dict(
196-
torch.load("fp8_linear_inference.pt")
196+
torch.load("fp8_linear_inference.pt", weights_only=False)
197197
)
198198
fp8_linear_with_calibration.eval()
199199

tests/cpu/test_fx_optimization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def test_concat_linear_hf_bert(self):
104104
config = AutoConfig.from_pretrained(loc + "/bert-base-config.json")
105105
model = AutoModelForCausalLM.from_config(config)
106106
model.eval()
107-
inputs = torch.load(loc + "/bert-inputs.pt")
107+
inputs = torch.load(loc + "/bert-inputs.pt", weights_only=False)
108108
gm = hf_symbolic_trace(model, input_names=list(inputs.keys()))
109109
ref_out = gm(**inputs)
110110
concat_gm = ipex.fx.concat_linear.concat_linear(copy.deepcopy(gm), inplace=True)
@@ -125,7 +125,7 @@ def test_automatically_apply_concat_linear_with_ipex_optimize(self):
125125
loc = os.path.dirname(os.path.abspath(__file__))
126126
config = AutoConfig.from_pretrained(loc + "/bert-base-config.json")
127127
base_model = AutoModelForCausalLM.from_config(config).eval()
128-
inputs = torch.load(loc + "/bert-inputs.pt")
128+
inputs = torch.load(loc + "/bert-inputs.pt", weights_only=False)
129129
for dtype in [torch.float, torch.bfloat16]:
130130
for inplace in [True, False]:
131131
model = copy.deepcopy(base_model)

0 commit comments

Comments
 (0)