Skip to content

Commit 9676cb8

Browse files
feat(mm): wip port main models to new api
1 parent 7765df4 commit 9676cb8

File tree

7 files changed

+500
-201
lines changed

7 files changed

+500
-201
lines changed

invokeai/app/api/routers/model_manager.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,7 @@
2929
)
3030
from invokeai.app.util.suppress_output import SuppressOutput
3131
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType
32-
from invokeai.backend.model_manager.config import (
33-
AnyModelConfig,
34-
MainCheckpointConfig,
35-
)
32+
from invokeai.backend.model_manager.config import AnyModelConfig, SD_1_2_XL_XLRefiner_CheckpointConfig
3633
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
3734
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
3835
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
@@ -741,9 +738,10 @@ async def convert_model(
741738
logger.error(str(e))
742739
raise HTTPException(status_code=424, detail=str(e))
743740

744-
if not isinstance(model_config, MainCheckpointConfig):
745-
logger.error(f"The model with key {key} is not a main checkpoint model.")
746-
raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.")
741+
if isinstance(model_config, SD_1_2_XL_XLRefiner_CheckpointConfig):
742+
msg = f"The model with key {key} is not a main SD 1/2/XL checkpoint model."
743+
logger.error(msg)
744+
raise HTTPException(400, msg)
747745

748746
with TemporaryDirectory(dir=ApiDependencies.invoker.services.configuration.models_path) as tmpdir:
749747
convert_path = pathlib.Path(tmpdir) / pathlib.Path(model_config.path).stem

invokeai/app/services/model_install/model_install_default.py

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
InvalidModelConfigException,
4242
ModelConfigFactory,
4343
)
44-
from invokeai.backend.model_manager.legacy_probe import ModelProbe
4544
from invokeai.backend.model_manager.metadata import (
4645
AnyModelRepoMetadata,
4746
HuggingFaceMetadataFetch,
@@ -601,22 +600,11 @@ def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None):
601600
hash_algo = self._app_config.hashing_algorithm
602601
fields = config.model_dump()
603602

604-
# WARNING!
605-
# The legacy probe relies on the implicit order of tests to determine model classification.
606-
# This can lead to regressions between the legacy and new probes.
607-
# Do NOT change the order of `probe` and `classify` without implementing one of the following fixes:
608-
# Short-term fix: `classify` tests `matches` in the same order as the legacy probe.
609-
# Long-term fix: Improve `matches` to be more specific so that only one config matches
610-
# any given model - eliminating ambiguity and removing reliance on order.
611-
# After implementing either of these fixes, remove @pytest.mark.xfail from `test_regression_against_model_probe`
612-
try:
613-
return ModelProbe.probe(model_path=model_path, fields=deepcopy(fields), hash_algo=hash_algo) # type: ignore
614-
except InvalidModelConfigException:
615-
return ModelConfigFactory.from_model_on_disk(
616-
mod=model_path,
617-
overrides=deepcopy(fields),
618-
hash_algo=hash_algo,
619-
)
603+
return ModelConfigFactory.from_model_on_disk(
604+
mod=model_path,
605+
overrides=deepcopy(fields),
606+
hash_algo=hash_algo,
607+
)
620608

621609
def _register(
622610
self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None

0 commit comments

Comments
 (0)