File tree Expand file tree Collapse file tree 5 files changed +11
-13
lines changed
Expand file tree Collapse file tree 5 files changed +11
-13
lines changed Original file line number Diff line number Diff line change @@ -29,12 +29,12 @@ models:
2929 default_chat_model :
3030 api_key : ${GRAPHRAG_API_KEY}
3131 type : openai_chat
32- model : gpt-4o
32+ model : gpt-4.1
3333 model_supports_json : true
3434 default_embedding_model :
3535 api_key : ${GRAPHRAG_API_KEY}
3636 type : openai_embedding
37- model : text-embedding-ada-002
37+ model : text-embedding-3-large
3838` ` `
3939
4040#### Fields
Original file line number Diff line number Diff line change 4747DEFAULT_OUTPUT_BASE_DIR = "output"
4848DEFAULT_CHAT_MODEL_ID = "default_chat_model"
4949DEFAULT_CHAT_MODEL_TYPE = ModelType .OpenAIChat
50- DEFAULT_CHAT_MODEL = "gpt-4o "
50+ DEFAULT_CHAT_MODEL = "gpt-4.1 "
5151DEFAULT_CHAT_MODEL_AUTH_TYPE = AuthType .APIKey
5252DEFAULT_EMBEDDING_MODEL_ID = "default_embedding_model"
5353DEFAULT_EMBEDDING_MODEL_TYPE = ModelType .OpenAIEmbedding
54- DEFAULT_EMBEDDING_MODEL = "text-embedding-ada-002 "
54+ DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large "
5555DEFAULT_EMBEDDING_MODEL_AUTH_TYPE = AuthType .APIKey
5656DEFAULT_VECTOR_STORE_ID = "default_vector_store"
5757
@@ -309,8 +309,6 @@ class LanguageModelDefaults:
309309 proxy : None = None
310310 audience : None = None
311311 model_supports_json : None = None
312- tokens_per_minute : Literal ["auto" ] = "auto"
313- requests_per_minute : Literal ["auto" ] = "auto"
314312 rate_limit_strategy : str | None = "static"
315313 retry_strategy : str = "native"
316314 max_retries : int = 10
Original file line number Diff line number Diff line change 3232 model_supports_json: true # recommended if this is available for your model.
3333 concurrent_requests: { language_model_defaults .concurrent_requests } # max number of simultaneous LLM requests allowed
3434 async_mode: { language_model_defaults .async_mode .value } # or asyncio
35- retry_strategy: native
35+ retry_strategy: exponential_backoff
3636 max_retries: { language_model_defaults .max_retries }
37- tokens_per_minute: { language_model_defaults . tokens_per_minute } # set to null to disable rate limiting
38- requests_per_minute: { language_model_defaults . requests_per_minute } # set to null to disable rate limiting
37+ tokens_per_minute: null # set to null to disable rate limiting
38+ requests_per_minute: null # set to null to disable rate limiting
3939 { defs .DEFAULT_EMBEDDING_MODEL_ID } :
4040 type: { defs .DEFAULT_EMBEDDING_MODEL_TYPE .value } # or azure_openai_embedding
4141 # api_base: https://<instance>.openai.azure.com
5050 model_supports_json: true # recommended if this is available for your model.
5151 concurrent_requests: { language_model_defaults .concurrent_requests } # max number of simultaneous LLM requests allowed
5252 async_mode: { language_model_defaults .async_mode .value } # or asyncio
53- retry_strategy: native
53+ retry_strategy: exponential_backoff
5454 max_retries: { language_model_defaults .max_retries }
5555 tokens_per_minute: null # set to null to disable rate limiting or auto for dynamic
5656 requests_per_minute: null # set to null to disable rate limiting or auto for dynamic
Original file line number Diff line number Diff line change 77
88from pydantic import BaseModel , Field , model_validator
99
10- DEFAULT_VECTOR_SIZE : int = 1536
10+ DEFAULT_VECTOR_SIZE : int = 3072
1111
1212VALID_IDENTIFIER_REGEX = re .compile (r"^[A-Za-z_][A-Za-z0-9_]*$" )
1313
Original file line number Diff line number Diff line change @@ -30,7 +30,7 @@ def __init__(
3030 self .responses = config .responses if config and config .responses else responses
3131 self .response_index = 0
3232 self .config = config or LanguageModelConfig (
33- type = ModelType .MockChat , model = "gpt-4o " , api_key = "mock"
33+ type = ModelType .MockChat , model = "gpt-4.1 " , api_key = "mock"
3434 )
3535
3636 async def achat (
@@ -99,7 +99,7 @@ class MockEmbeddingLLM:
9999
100100 def __init__ (self , ** kwargs : Any ):
101101 self .config = LanguageModelConfig (
102- type = ModelType .MockEmbedding , model = "text-embedding-ada-002 " , api_key = "mock"
102+ type = ModelType .MockEmbedding , model = "text-embedding-3-large " , api_key = "mock"
103103 )
104104
105105 def embed_batch (self , text_list : list [str ], ** kwargs : Any ) -> list [list [float ]]:
You can’t perform that action at this time.
0 commit comments