Skip to content

Commit 70a095a

Browse files
metascroyfacebook-github-bot
authored andcommitted
pyre-fix (pytorch#14241)
Summary: Forward fix for pyre errors in D82242003 Differential Revision: D82265586
1 parent b23f883 commit 70a095a

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

backends/xnnpack/test/ops/test_linear.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ def _test_groupwise_dq_linear(
395395
quantize_(
396396
mod,
397397
Int8DynamicActivationIntxWeightConfig(
398-
weight_dtype=torch.int4, weight_granularity=PerGroup(group_size)
398+
weight_dtype=torch.int4, weight_granularity=PerGroup(group_size) # pyre-ignore[16]
399399
),
400400
)
401401
unwrap_tensor_subclass(mod)

examples/models/llama/source_transformation/quantize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def quantize( # noqa C901
135135
PerAxis(0) if group_size == 0 else PerGroup(group_size)
136136
),
137137
weight_mapping_type=MappingType.SYMMETRIC,
138-
intx_packing_format="opaque_torchao_auto",
138+
intx_packing_format="opaque_torchao_auto", # pyre-ignore[6]
139139
),
140140
)
141141
model = unwrap_tensor_subclass(model)
@@ -157,7 +157,7 @@ def quantize( # noqa C901
157157
quantize_(
158158
model,
159159
Int8DynamicActivationIntxWeightConfig(
160-
weight_dtype=torch.int4,
160+
weight_dtype=torch.int4, # pyre-ignore[16]
161161
weight_granularity=PerGroup(group_size),
162162
),
163163
)

0 commit comments

Comments
 (0)