|
15 | 15 |
|
16 | 16 |
|
17 | 17 | def validate_config_names(parameters: GraphRagConfig) -> None: |
18 | | - """Validate config file for LLM deployment name typos.""" |
19 | | - # Validate Chat LLM configs |
20 | | - # TODO: Replace default_chat_model with a way to select the model |
21 | | - default_llm_settings = parameters.get_language_model_config("default_chat_model") |
22 | | - |
23 | | - llm = ModelManager().register_chat( |
24 | | - name="test-llm", |
25 | | - model_type=default_llm_settings.type, |
26 | | - config=default_llm_settings, |
27 | | - callbacks=NoopWorkflowCallbacks(), |
28 | | - cache=None, |
29 | | - ) |
30 | | - |
31 | | - try: |
32 | | - asyncio.run(llm.achat("This is an LLM connectivity test. Say Hello World")) |
33 | | - logger.info("LLM Config Params Validated") |
34 | | - except Exception as e: # noqa: BLE001 |
35 | | - logger.error(f"LLM configuration error detected. Exiting...\n{e}") # noqa |
36 | | - sys.exit(1) |
37 | | - |
38 | | - # Validate Embeddings LLM configs |
39 | | - embedding_llm_settings = parameters.get_language_model_config( |
40 | | - parameters.embed_text.model_id |
41 | | - ) |
42 | | - |
43 | | - embed_llm = ModelManager().register_embedding( |
44 | | - name="test-embed-llm", |
45 | | - model_type=embedding_llm_settings.type, |
46 | | - config=embedding_llm_settings, |
47 | | - callbacks=NoopWorkflowCallbacks(), |
48 | | - cache=None, |
49 | | - ) |
50 | | - |
51 | | - try: |
52 | | - asyncio.run(embed_llm.aembed_batch(["This is an LLM Embedding Test String"])) |
53 | | - logger.info("Embedding LLM Config Params Validated") |
54 | | - except Exception as e: # noqa: BLE001 |
55 | | - logger.error(f"Embedding LLM configuration error detected. Exiting...\n{e}") # noqa |
56 | | - sys.exit(1) |
| 18 | + """Validate config file for model deployment name typos, by running a quick test message for each.""" |
| 19 | + for id, config in parameters.models.items(): |
| 20 | + if config.type in ["chat", "azure_openai", "openai"]: |
| 21 | + llm = ModelManager().register_chat( |
| 22 | + name="test-llm", |
| 23 | + model_type=config.type, |
| 24 | + config=config, |
| 25 | + callbacks=NoopWorkflowCallbacks(), |
| 26 | + cache=None, |
| 27 | + ) |
| 28 | + try: |
| 29 | + asyncio.run( |
| 30 | + llm.achat("This is an LLM connectivity test. Say Hello World") |
| 31 | + ) |
| 32 | + logger.info("LLM Config Params Validated") |
| 33 | + except Exception as e: # noqa: BLE001 |
| 34 | + logger.error(f"LLM configuration error detected.\n{e}") # noqa |
| 35 | + print(f"Failed to validate language model ({id}) params", e) # noqa: T201 |
| 36 | + sys.exit(1) |
| 37 | + elif config.type in ["embedding", "azure_openai_embedding", "openai_embedding"]: |
| 38 | + embed_llm = ModelManager().register_embedding( |
| 39 | + name="test-embed-llm", |
| 40 | + model_type=config.type, |
| 41 | + config=config, |
| 42 | + callbacks=NoopWorkflowCallbacks(), |
| 43 | + cache=None, |
| 44 | + ) |
| 45 | + try: |
| 46 | + asyncio.run( |
| 47 | + embed_llm.aembed_batch(["This is an LLM Embedding Test String"]) |
| 48 | + ) |
| 49 | + logger.info("Embedding LLM Config Params Validated") |
| 50 | + except Exception as e: # noqa: BLE001 |
| 51 | + logger.error(f"Embedding configuration error detected.\n{e}") # noqa |
| 52 | + print(f"Failed to validate embedding model ({id}) params", e) # noqa: T201 |
| 53 | + sys.exit(1) |
0 commit comments