diff --git a/invokeai/backend/model_manager/load/model_loaders/cogview4.py b/invokeai/backend/model_manager/load/model_loaders/cogview4.py index 782ff38450c..ee8c6d4f41d 100644 --- a/invokeai/backend/model_manager/load/model_loaders/cogview4.py +++ b/invokeai/backend/model_manager/load/model_loaders/cogview4.py @@ -45,12 +45,13 @@ def _load_model( model_path, torch_dtype=dtype, variant=variant, + local_files_only=True, ) except OSError as e: if variant and "no file named" in str( e ): # try without the variant, just in case user's preferences changed - result = load_class.from_pretrained(model_path, torch_dtype=dtype) + result = load_class.from_pretrained(model_path, torch_dtype=dtype, local_files_only=True) else: raise e diff --git a/invokeai/backend/model_manager/load/model_loaders/flux.py b/invokeai/backend/model_manager/load/model_loaders/flux.py index e44ddec382c..4e732187a34 100644 --- a/invokeai/backend/model_manager/load/model_loaders/flux.py +++ b/invokeai/backend/model_manager/load/model_loaders/flux.py @@ -122,9 +122,9 @@ def _load_model( match submodel_type: case SubModelType.Tokenizer: - return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer") + return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer", local_files_only=True) case SubModelType.TextEncoder: - return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder") + return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder", local_files_only=True) raise ValueError( f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" @@ -148,10 +148,12 @@ def _load_model( ) match submodel_type: case SubModelType.Tokenizer2 | SubModelType.Tokenizer3: - return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512) + return T5TokenizerFast.from_pretrained( + Path(config.path) / "tokenizer_2", max_length=512, local_files_only=True + ) case SubModelType.TextEncoder2 | SubModelType.TextEncoder3: te2_model_path = Path(config.path) / "text_encoder_2" - model_config = AutoConfig.from_pretrained(te2_model_path) + model_config = AutoConfig.from_pretrained(te2_model_path, local_files_only=True) with accelerate.init_empty_weights(): model = AutoModelForTextEncoding.from_config(model_config) model = quantize_model_llm_int8(model, modules_to_not_convert=set()) @@ -192,10 +194,15 @@ def _load_model( match submodel_type: case SubModelType.Tokenizer2 | SubModelType.Tokenizer3: - return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512) + return T5TokenizerFast.from_pretrained( + Path(config.path) / "tokenizer_2", max_length=512, local_files_only=True + ) case SubModelType.TextEncoder2 | SubModelType.TextEncoder3: return T5EncoderModel.from_pretrained( - Path(config.path) / "text_encoder_2", torch_dtype="auto", low_cpu_mem_usage=True + Path(config.path) / "text_encoder_2", + torch_dtype="auto", + low_cpu_mem_usage=True, + local_files_only=True, ) raise ValueError( diff --git a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py index b888c69edf9..2a79f604ba2 100644 --- a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py +++ b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py @@ -37,12 +37,14 @@ def _load_model( repo_variant = config.repo_variant if isinstance(config, Diffusers_Config_Base) else None variant = repo_variant.value if repo_variant else None try: - result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) + result: AnyModel = model_class.from_pretrained( + model_path, torch_dtype=self._torch_dtype, variant=variant, local_files_only=True + ) except OSError as e: if variant and "no file named" in str( e ): # try without the variant, just in case user's preferences changed - result = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype) + result = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, local_files_only=True) else: raise e return result diff --git a/invokeai/backend/model_manager/load/model_loaders/onnx.py b/invokeai/backend/model_manager/load/model_loaders/onnx.py index a565bb11d05..6ffab997cf3 100644 --- a/invokeai/backend/model_manager/load/model_loaders/onnx.py +++ b/invokeai/backend/model_manager/load/model_loaders/onnx.py @@ -38,5 +38,6 @@ def _load_model( model_path, torch_dtype=self._torch_dtype, variant=variant, + local_files_only=True, ) return result diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index d0cc5893796..0e11cd4191d 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -80,12 +80,13 @@ def _load_model( model_path, torch_dtype=self._torch_dtype, variant=variant, + local_files_only=True, ) except OSError as e: if variant and "no file named" in str( e ): # try without the variant, just in case user's preferences changed - result = load_class.from_pretrained(model_path, torch_dtype=self._torch_dtype) + result = load_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, local_files_only=True) else: raise e