|
19 | 19 |
|
20 | 20 | models: |
21 | 21 | {defs.DEFAULT_CHAT_MODEL_ID}: |
22 | | - type: {defs.DEFAULT_CHAT_MODEL_TYPE.value} # or azure_openai_chat |
23 | | - # api_base: https://<instance>.openai.azure.com |
24 | | - # api_version: 2024-05-01-preview |
| 22 | + type: {defs.DEFAULT_CHAT_MODEL_TYPE.value} |
| 23 | + model_provider: {defs.DEFAULT_MODEL_PROVIDER} |
25 | 24 | auth_type: {defs.DEFAULT_CHAT_MODEL_AUTH_TYPE.value} # or azure_managed_identity |
26 | | - api_key: ${{GRAPHRAG_API_KEY}} # set this in the generated .env file |
27 | | - # audience: "https://cognitiveservices.azure.com/.default" |
28 | | - # organization: <organization_id> |
| 25 | + api_key: ${{GRAPHRAG_API_KEY}} # set this in the generated .env file, or remove if managed identity |
29 | 26 | model: {defs.DEFAULT_CHAT_MODEL} |
30 | | - # deployment_name: <azure_model_deployment_name> |
31 | | - # encoding_model: {defs.ENCODING_MODEL} # automatically set by tiktoken if left undefined |
| 27 | + # api_base: https://<instance>.openai.azure.com |
| 28 | + # api_version: 2024-05-01-preview |
32 | 29 | model_supports_json: true # recommended if this is available for your model. |
33 | | - concurrent_requests: {language_model_defaults.concurrent_requests} # max number of simultaneous LLM requests allowed |
| 30 | + concurrent_requests: {language_model_defaults.concurrent_requests} |
34 | 31 | async_mode: {language_model_defaults.async_mode.value} # or asyncio |
35 | | - retry_strategy: native |
| 32 | + retry_strategy: {language_model_defaults.retry_strategy} |
36 | 33 | max_retries: {language_model_defaults.max_retries} |
37 | | - tokens_per_minute: {language_model_defaults.tokens_per_minute} # set to null to disable rate limiting |
38 | | - requests_per_minute: {language_model_defaults.requests_per_minute} # set to null to disable rate limiting |
| 34 | + tokens_per_minute: null |
| 35 | + requests_per_minute: null |
39 | 36 | {defs.DEFAULT_EMBEDDING_MODEL_ID}: |
40 | | - type: {defs.DEFAULT_EMBEDDING_MODEL_TYPE.value} # or azure_openai_embedding |
41 | | - # api_base: https://<instance>.openai.azure.com |
42 | | - # api_version: 2024-05-01-preview |
43 | | - auth_type: {defs.DEFAULT_EMBEDDING_MODEL_AUTH_TYPE.value} # or azure_managed_identity |
| 37 | + type: {defs.DEFAULT_EMBEDDING_MODEL_TYPE.value} |
| 38 | + model_provider: {defs.DEFAULT_MODEL_PROVIDER} |
| 39 | + auth_type: {defs.DEFAULT_EMBEDDING_MODEL_AUTH_TYPE.value} |
44 | 40 | api_key: ${{GRAPHRAG_API_KEY}} |
45 | | - # audience: "https://cognitiveservices.azure.com/.default" |
46 | | - # organization: <organization_id> |
47 | 41 | model: {defs.DEFAULT_EMBEDDING_MODEL} |
48 | | - # deployment_name: <azure_model_deployment_name> |
49 | | - # encoding_model: {defs.ENCODING_MODEL} # automatically set by tiktoken if left undefined |
50 | | - model_supports_json: true # recommended if this is available for your model. |
51 | | - concurrent_requests: {language_model_defaults.concurrent_requests} # max number of simultaneous LLM requests allowed |
| 42 | + # api_base: https://<instance>.openai.azure.com |
| 43 | + # api_version: 2024-05-01-preview |
| 44 | + concurrent_requests: {language_model_defaults.concurrent_requests} |
52 | 45 | async_mode: {language_model_defaults.async_mode.value} # or asyncio |
53 | | - retry_strategy: native |
| 46 | + retry_strategy: {language_model_defaults.retry_strategy} |
54 | 47 | max_retries: {language_model_defaults.max_retries} |
55 | | - tokens_per_minute: null # set to null to disable rate limiting or auto for dynamic |
56 | | - requests_per_minute: null # set to null to disable rate limiting or auto for dynamic |
| 48 | + tokens_per_minute: null |
| 49 | + requests_per_minute: null |
57 | 50 |
|
58 | 51 | ### Input settings ### |
59 | 52 |
|
|
62 | 55 | type: {graphrag_config_defaults.input.storage.type.value} # or blob |
63 | 56 | base_dir: "{graphrag_config_defaults.input.storage.base_dir}" |
64 | 57 | file_type: {graphrag_config_defaults.input.file_type.value} # [csv, text, json] |
65 | | - |
66 | 58 |
|
67 | 59 | chunks: |
68 | 60 | size: {graphrag_config_defaults.chunks.size} |
|
90 | 82 | type: {vector_store_defaults.type} |
91 | 83 | db_uri: {vector_store_defaults.db_uri} |
92 | 84 | container_name: {vector_store_defaults.container_name} |
93 | | - overwrite: {vector_store_defaults.overwrite} |
94 | 85 |
|
95 | 86 | ### Workflow settings ### |
96 | 87 |
|
|
0 commit comments