-
Couldn't load subscription status.
- Fork 6.4k
[bitsandbbytes] follow-ups #9730
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 11 commits
14a44e5
065700c
9c3a952
e39544a
23fdc7a
cb94414
1fa9d7f
4c7ea4f
6dc8936
3dbe41f
8a99701
1af10a8
3298a04
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -211,21 +211,28 @@ def load_model_dict_into_meta( | |
| set_module_kwargs["dtype"] = dtype | ||
|
|
||
| # bnb params are flattened. | ||
| if not is_quant_method_bnb and empty_state_dict[param_name].shape != param.shape: | ||
| model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else "" | ||
| raise ValueError( | ||
| f"Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example." | ||
| ) | ||
| if empty_state_dict[param_name].shape != param.shape: | ||
| if ( | ||
| is_quant_method_bnb | ||
| and hf_quantizer.pre_quantized | ||
| and hf_quantizer.check_if_quantized_param(model, param, param_name, state_dict, param_device=device) | ||
| ): | ||
| hf_quantizer.check_quantized_param_shape(param_name, empty_state_dict[param_name].shape, param.shape) | ||
| elif not is_quant_method_bnb: | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could have done with |
||
| model_name_or_path_str = f"{model_name_or_path} " if model_name_or_path is not None else "" | ||
| raise ValueError( | ||
| f"Cannot load {model_name_or_path_str} because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example." | ||
| ) | ||
|
|
||
| if not is_quantized or ( | ||
| not hf_quantizer.check_quantized_param(model, param, param_name, state_dict, param_device=device) | ||
| if is_quantized and ( | ||
| hf_quantizer.check_if_quantized_param(model, param, param_name, state_dict, param_device=device) | ||
| ): | ||
| hf_quantizer.create_quantized_param(model, param, param_name, device, state_dict, unexpected_keys) | ||
| else: | ||
| if accepts_dtype: | ||
| set_module_tensor_to_device(model, param_name, device, value=param, **set_module_kwargs) | ||
| else: | ||
| set_module_tensor_to_device(model, param_name, device, value=param) | ||
| else: | ||
| hf_quantizer.create_quantized_param(model, param, param_name, device, state_dict, unexpected_keys) | ||
|
|
||
| return unexpected_keys | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -134,7 +134,7 @@ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, | |||||
| """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization""" | ||||||
| return max_memory | ||||||
|
|
||||||
| def check_quantized_param( | ||||||
| def check_if_quantized_param( | ||||||
| self, | ||||||
| model: "ModelMixin", | ||||||
| param_value: "torch.Tensor", | ||||||
|
|
@@ -152,11 +152,21 @@ def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter": | |||||
| """ | ||||||
| takes needed components from state_dict and creates quantized param. | ||||||
| """ | ||||||
| if not hasattr(self, "check_quantized_param"): | ||||||
| if not hasattr(self, "check_if_quantized_param"): | ||||||
| raise AttributeError( | ||||||
| f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}." | ||||||
| ) | ||||||
|
|
||||||
| def check_quantized_param_shape(self, *args, **kwargs): | ||||||
| """ | ||||||
| checks if the quantized param has expected shape. | ||||||
| """ | ||||||
| if not hasattr(self, "check_quantized_param_shape"): | ||||||
|
||||||
| if not hasattr(self, "check_quantized_param_shape"): | |
| if not hasattr(self, "check_if_quantized_param"):: |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The method is checking for itself? That can't be the case right? Because the subclass inherits it? So it should already be in there?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My bad. Have updated in 3298a04.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To unify the content between 8bit and 4bit hfoption.