Skip to content

Commit 329752d

Browse files
authored
Merge branch 'main' into fix/agenerate-wrong-exception
2 parents 26f6a69 + 54885b8 commit 329752d

File tree

145 files changed

+2375
-1576
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

145 files changed

+2375
-1576
lines changed
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
{
2+
"type": "major",
3+
"description": "Rework API to accept callbacks."
4+
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
{
2+
"type": "minor",
3+
"description": "Add LMM Manager and Factory, to support provider registration"
4+
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
{
2+
"type": "patch",
3+
"description": "Refactor config defaults from constants to type-safe, hierarchical dataclass."
4+
}

graphrag/api/prompt_tune.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@
1313

1414
from pydantic import PositiveInt, validate_call
1515

16-
import graphrag.config.defaults as defs
1716
from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks
17+
from graphrag.config.defaults import language_model_defaults
1818
from graphrag.config.models.graph_rag_config import GraphRagConfig
19-
from graphrag.index.llm.load_llm import load_llm
19+
from graphrag.language_model.manager import ModelManager
2020
from graphrag.logger.print_progress import PrintProgressLogger
2121
from graphrag.prompt_tune.defaults import MAX_TOKEN_COUNT, PROMPT_TUNING_MODEL_ID
2222
from graphrag.prompt_tune.generator.community_report_rating import (
@@ -102,13 +102,16 @@ async def generate_indexing_prompts(
102102
# if max_retries is not set, inject a dynamically assigned value based on the number of expected LLM calls
103103
# to be made or fallback to a default value in the worst case
104104
if default_llm_settings.max_retries == -1:
105-
default_llm_settings.max_retries = min(len(doc_list), defs.LLM_MAX_RETRIES)
105+
default_llm_settings.max_retries = min(
106+
len(doc_list), language_model_defaults.max_retries
107+
)
106108

107-
llm = load_llm(
108-
"prompt_tuning",
109-
default_llm_settings,
110-
cache=None,
109+
llm = ModelManager().register_chat(
110+
name="prompt_tuning",
111+
model_type=default_llm_settings.type,
112+
config=default_llm_settings,
111113
callbacks=NoopWorkflowCallbacks(),
114+
cache=None,
112115
)
113116

114117
if not domain:

0 commit comments

Comments
 (0)