diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py index 84f42e12243..3fb7a574f31 100644 --- a/invokeai/backend/model_manager/load/load_default.py +++ b/invokeai/backend/model_manager/load/load_default.py @@ -75,7 +75,6 @@ def _load_and_cache(self, config: AnyModelConfig, submodel_type: Optional[SubMod config.path = str(self._get_model_path(config)) self._ram_cache.make_room(self.get_size_fs(config, Path(config.path), submodel_type)) - self._logger.info(f"Loading model '{stats_name}' into RAM cache..., config={config}") loaded_model = self._load_model(config, submodel_type) self._ram_cache.put( diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index 4c05911019d..0e11cd4191d 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -140,7 +140,6 @@ def _load_from_singlefile( # Some weights of the model checkpoint were not used when initializing CLIPTextModelWithProjection: # ['text_model.embeddings.position_ids'] - self._logger.info(f"Loading model from single file at {config.path} using {load_class.__name__}") with SilenceWarnings(): pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype)