2121from huggingface_hub .utils import validate_hf_hub_args
2222from typing_extensions import Self
2323
24+ from .. import __version__
2425from ..quantizers import DiffusersAutoQuantizer
2526from ..utils import deprecate , is_accelerate_available , logging
2627from .single_file_utils import (
@@ -260,6 +261,11 @@ def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] =
260261 device = kwargs .pop ("device" , None )
261262 disable_mmap = kwargs .pop ("disable_mmap" , False )
262263
264+ user_agent = {"diffusers" : __version__ , "file_type" : "single_file" , "framework" : "pytorch" }
265+ # In order to ensure popular quantization methods are supported. Can be disable with `disable_telemetry`
266+ if quantization_config is not None :
267+ user_agent ["quant" ] = quantization_config .quant_method .value
268+
263269 if torch_dtype is not None and not isinstance (torch_dtype , torch .dtype ):
264270 torch_dtype = torch .float32
265271 logger .warning (
@@ -278,6 +284,7 @@ def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] =
278284 local_files_only = local_files_only ,
279285 revision = revision ,
280286 disable_mmap = disable_mmap ,
287+ user_agent = user_agent ,
281288 )
282289 if quantization_config is not None :
283290 hf_quantizer = DiffusersAutoQuantizer .from_config (quantization_config )
0 commit comments