Skip to content

Commit 9f8f996

Browse files
fix(model-loaders): add local_files_only=True to prevent network requests (#8735)
1 parent 252dd5b commit 9f8f996

File tree

5 files changed

+22
-10
lines changed

5 files changed

+22
-10
lines changed

invokeai/backend/model_manager/load/model_loaders/cogview4.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,12 +45,13 @@ def _load_model(
4545
model_path,
4646
torch_dtype=dtype,
4747
variant=variant,
48+
local_files_only=True,
4849
)
4950
except OSError as e:
5051
if variant and "no file named" in str(
5152
e
5253
): # try without the variant, just in case user's preferences changed
53-
result = load_class.from_pretrained(model_path, torch_dtype=dtype)
54+
result = load_class.from_pretrained(model_path, torch_dtype=dtype, local_files_only=True)
5455
else:
5556
raise e
5657

invokeai/backend/model_manager/load/model_loaders/flux.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ def _load_model(
122122

123123
match submodel_type:
124124
case SubModelType.Tokenizer:
125-
return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer")
125+
return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer", local_files_only=True)
126126
case SubModelType.TextEncoder:
127-
return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder")
127+
return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder", local_files_only=True)
128128

129129
raise ValueError(
130130
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
@@ -148,10 +148,12 @@ def _load_model(
148148
)
149149
match submodel_type:
150150
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
151-
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
151+
return T5TokenizerFast.from_pretrained(
152+
Path(config.path) / "tokenizer_2", max_length=512, local_files_only=True
153+
)
152154
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
153155
te2_model_path = Path(config.path) / "text_encoder_2"
154-
model_config = AutoConfig.from_pretrained(te2_model_path)
156+
model_config = AutoConfig.from_pretrained(te2_model_path, local_files_only=True)
155157
with accelerate.init_empty_weights():
156158
model = AutoModelForTextEncoding.from_config(model_config)
157159
model = quantize_model_llm_int8(model, modules_to_not_convert=set())
@@ -192,10 +194,15 @@ def _load_model(
192194

193195
match submodel_type:
194196
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
195-
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
197+
return T5TokenizerFast.from_pretrained(
198+
Path(config.path) / "tokenizer_2", max_length=512, local_files_only=True
199+
)
196200
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
197201
return T5EncoderModel.from_pretrained(
198-
Path(config.path) / "text_encoder_2", torch_dtype="auto", low_cpu_mem_usage=True
202+
Path(config.path) / "text_encoder_2",
203+
torch_dtype="auto",
204+
low_cpu_mem_usage=True,
205+
local_files_only=True,
199206
)
200207

201208
raise ValueError(

invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,14 @@ def _load_model(
3737
repo_variant = config.repo_variant if isinstance(config, Diffusers_Config_Base) else None
3838
variant = repo_variant.value if repo_variant else None
3939
try:
40-
result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant)
40+
result: AnyModel = model_class.from_pretrained(
41+
model_path, torch_dtype=self._torch_dtype, variant=variant, local_files_only=True
42+
)
4143
except OSError as e:
4244
if variant and "no file named" in str(
4345
e
4446
): # try without the variant, just in case user's preferences changed
45-
result = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype)
47+
result = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, local_files_only=True)
4648
else:
4749
raise e
4850
return result

invokeai/backend/model_manager/load/model_loaders/onnx.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,5 +38,6 @@ def _load_model(
3838
model_path,
3939
torch_dtype=self._torch_dtype,
4040
variant=variant,
41+
local_files_only=True,
4142
)
4243
return result

invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,13 @@ def _load_model(
8080
model_path,
8181
torch_dtype=self._torch_dtype,
8282
variant=variant,
83+
local_files_only=True,
8384
)
8485
except OSError as e:
8586
if variant and "no file named" in str(
8687
e
8788
): # try without the variant, just in case user's preferences changed
88-
result = load_class.from_pretrained(model_path, torch_dtype=self._torch_dtype)
89+
result = load_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, local_files_only=True)
8990
else:
9091
raise e
9192

0 commit comments

Comments
 (0)