Skip to content

Commit be2a083

Browse files
authored
Fix dequant_dtype handling (#334)
* Fix dequant_dtype handling * Simplify dequant dtype selection expression
1 parent 6b778af commit be2a083

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def ggml_save_to_state_dict(self, destination, prefix, keep_vars):
153153
# Take into account space required for dequantizing the largest tensor
154154
if self.largest_layer:
155155
shape = getattr(self.weight, "tensor_shape", self.weight.shape)
156-
dtype = (self.dequant_dtype and self.dequant_dtype != "target") or torch.float16
156+
dtype = self.dequant_dtype if self.dequant_dtype and self.dequant_dtype != "target" else torch.float16
157157
temp = torch.empty(*shape, device=torch.device("meta"), dtype=dtype)
158158
destination[prefix + "temp.weight"] = temp
159159

0 commit comments

Comments
 (0)