|
17 | 17 | import aiohttp.web |
18 | 18 | import pytest |
19 | 19 |
|
20 | | -from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH |
21 | | -from metagpt.context import Context as MetagptContext |
22 | | -from metagpt.llm import LLM |
23 | | -from metagpt.logs import logger |
| 20 | +from metagpt.core.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH |
| 21 | +from metagpt.core.context import Context as MetagptContext |
| 22 | +from metagpt.core.llm import LLM |
| 23 | +from metagpt.core.logs import logger |
24 | 24 | from metagpt.utils.git_repository import GitRepository |
25 | 25 | from tests.mock.mock_aiohttp import MockAioResponse |
26 | 26 | from tests.mock.mock_curl_cffi import MockCurlCffiResponse |
@@ -62,8 +62,8 @@ def pytest_runtest_makereport(item, call): |
62 | 62 | def llm_mock(rsp_cache, mocker, request): |
63 | 63 | llm = MockLLM(allow_open_api_call=ALLOW_OPENAI_API_CALL) |
64 | 64 | llm.rsp_cache = rsp_cache |
65 | | - mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", llm.aask) |
66 | | - mocker.patch("metagpt.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch) |
| 65 | + mocker.patch("metagpt.core.provider.base_llm.BaseLLM.aask", llm.aask) |
| 66 | + mocker.patch("metagpt.core.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch) |
67 | 67 | mocker.patch("metagpt.provider.openai_api.OpenAILLM.aask_code", llm.aask_code) |
68 | 68 | yield mocker |
69 | 69 | if hasattr(request.node, "test_outcome") and request.node.test_outcome.passed: |
|
0 commit comments