Skip to content

Commit 3097106

Browse files
committed
feat: Add configurable API timeout setting
- Add CONF_API_TIMEOUT configuration option (5-600 seconds, default 30) - Update config_flow.py with api_timeout field in provider form and options flow - Update api_client.py to use configurable timeout instead of hardcoded value - Update coordinator.py to use api_timeout for async_process_message - Update __init__.py to read and pass api_timeout from config - Merge entry.data with entry.options for proper options flow support - Add translations for api_timeout in all 8 language files (en, ru, de, es, it, hi, sr, zh) - Bump version to 2.2.0 Closes #8
1 parent 3507396 commit 3097106

File tree

14 files changed

+90
-20
lines changed

14 files changed

+90
-20
lines changed

custom_components/ha_text_ai/__init__.py

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
CONF_MAX_TOKENS,
3636
CONF_API_ENDPOINT,
3737
CONF_REQUEST_INTERVAL,
38+
CONF_API_TIMEOUT,
3839
CONF_API_PROVIDER,
3940
CONF_CONTEXT_MESSAGES,
4041
API_PROVIDER_OPENAI,
@@ -51,8 +52,8 @@
5152
DEFAULT_DEEPSEEK_ENDPOINT,
5253
DEFAULT_GEMINI_ENDPOINT,
5354
DEFAULT_REQUEST_INTERVAL,
55+
DEFAULT_API_TIMEOUT,
5456
DEFAULT_CONTEXT_MESSAGES,
55-
API_TIMEOUT,
5657
SERVICE_ASK_QUESTION,
5758
SERVICE_CLEAR_HISTORY,
5859
SERVICE_GET_HISTORY,
@@ -268,7 +269,7 @@ def copy_file():
268269

269270
return True
270271

271-
async def async_check_api(session, endpoint: str, headers: dict, provider: str) -> bool:
272+
async def async_check_api(session, endpoint: str, headers: dict, provider: str, api_timeout: int = DEFAULT_API_TIMEOUT) -> bool:
272273
"""Check API availability for different providers."""
273274
try:
274275
if provider == API_PROVIDER_GEMINI:
@@ -285,7 +286,7 @@ async def async_check_api(session, endpoint: str, headers: dict, provider: str)
285286
else: # OpenAI
286287
check_url = f"{endpoint}/models"
287288

288-
async with timeout(API_TIMEOUT):
289+
async with timeout(api_timeout):
289290
async with session.get(check_url, headers=headers) as response:
290291
if response.status in [200, 404]:
291292
return True
@@ -311,22 +312,24 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
311312
_LOGGER.error("API provider not specified")
312313
raise ConfigEntryNotReady("API provider is required")
313314

314-
# Get configuration
315+
# Get configuration (merge data with options to apply any runtime changes)
316+
config = {**entry.data, **entry.options}
315317
session = aiohttp_client.async_get_clientsession(hass)
316-
api_provider = entry.data.get(CONF_API_PROVIDER)
317-
model = entry.data.get(CONF_MODEL, DEFAULT_MODEL)
318-
endpoint = entry.data.get(
318+
api_provider = config.get(CONF_API_PROVIDER)
319+
model = config.get(CONF_MODEL, DEFAULT_MODEL)
320+
endpoint = config.get(
319321
CONF_API_ENDPOINT,
320322
DEFAULT_OPENAI_ENDPOINT if api_provider == API_PROVIDER_OPENAI
321323
else DEFAULT_ANTHROPIC_ENDPOINT
322324
).rstrip('/')
323-
api_key = entry.data[CONF_API_KEY]
325+
api_key = entry.data[CONF_API_KEY] # API key stays in data, not in options
324326
instance_name = entry.data.get(CONF_NAME, entry.entry_id)
325-
request_interval = entry.data.get(CONF_REQUEST_INTERVAL, DEFAULT_REQUEST_INTERVAL)
326-
max_tokens = entry.data.get(CONF_MAX_TOKENS, DEFAULT_MAX_TOKENS)
327-
temperature = entry.data.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE)
328-
max_history_size = entry.data.get(CONF_MAX_HISTORY_SIZE, DEFAULT_MAX_HISTORY)
329-
context_messages = entry.data.get(CONF_CONTEXT_MESSAGES, DEFAULT_CONTEXT_MESSAGES)
327+
request_interval = config.get(CONF_REQUEST_INTERVAL, DEFAULT_REQUEST_INTERVAL)
328+
api_timeout = config.get(CONF_API_TIMEOUT, DEFAULT_API_TIMEOUT)
329+
max_tokens = config.get(CONF_MAX_TOKENS, DEFAULT_MAX_TOKENS)
330+
temperature = config.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE)
331+
max_history_size = config.get(CONF_MAX_HISTORY_SIZE, DEFAULT_MAX_HISTORY)
332+
context_messages = config.get(CONF_CONTEXT_MESSAGES, DEFAULT_CONTEXT_MESSAGES)
330333
is_anthropic = api_provider == API_PROVIDER_ANTHROPIC
331334

332335
headers = {
@@ -340,7 +343,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
340343
else:
341344
headers["Authorization"] = f"Bearer {api_key}"
342345

343-
if not await async_check_api(session, endpoint, headers, api_provider):
346+
if not await async_check_api(session, endpoint, headers, api_provider, api_timeout):
344347
raise ConfigEntryNotReady("API connection failed")
345348

346349
_LOGGER.debug("Creating API client for %s with endpoint %s", api_provider, endpoint)
@@ -351,6 +354,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
351354
headers=headers,
352355
api_provider=api_provider,
353356
model=model,
357+
api_timeout=api_timeout,
354358
)
355359

356360
coordinator = HATextAICoordinator(
@@ -364,6 +368,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
364368
max_history_size=max_history_size,
365369
context_messages=context_messages,
366370
is_anthropic=is_anthropic,
371+
api_timeout=api_timeout,
367372
)
368373

369374
_LOGGER.debug(f"Created coordinator for {instance_name}")

custom_components/ha_text_ai/api_client.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from homeassistant.core import HomeAssistant
1717
from homeassistant.exceptions import HomeAssistantError
1818
from .const import (
19-
API_TIMEOUT,
19+
DEFAULT_API_TIMEOUT,
2020
API_RETRY_COUNT,
2121
API_PROVIDER_ANTHROPIC,
2222
API_PROVIDER_DEEPSEEK,
@@ -41,14 +41,16 @@ def __init__(
4141
headers: Dict[str, str],
4242
api_provider: str,
4343
model: str,
44+
api_timeout: int = DEFAULT_API_TIMEOUT,
4445
) -> None:
4546
"""Initialize API client."""
4647
self.session = session
4748
self.endpoint = endpoint
4849
self.headers = headers
4950
self.api_provider = api_provider
5051
self.model = model
51-
self.timeout = ClientTimeout(total=API_TIMEOUT)
52+
self.api_timeout = api_timeout
53+
self.timeout = ClientTimeout(total=api_timeout)
5254
self._closed = False
5355

5456
async def __aenter__(self):
@@ -93,7 +95,7 @@ async def _make_request(
9395

9496
for attempt in range(API_RETRY_COUNT):
9597
try:
96-
async with timeout(API_TIMEOUT):
98+
async with timeout(self.api_timeout):
9799
async with self.session.post(
98100
url,
99101
json=payload,

custom_components/ha_text_ai/config_flow.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
CONF_MAX_TOKENS,
2626
CONF_API_ENDPOINT,
2727
CONF_REQUEST_INTERVAL,
28+
CONF_API_TIMEOUT,
2829
CONF_API_PROVIDER,
2930
CONF_CONTEXT_MESSAGES,
3031
API_PROVIDER_OPENAI,
@@ -38,6 +39,7 @@
3839
DEFAULT_TEMPERATURE,
3940
DEFAULT_MAX_TOKENS,
4041
DEFAULT_REQUEST_INTERVAL,
42+
DEFAULT_API_TIMEOUT,
4143
DEFAULT_OPENAI_ENDPOINT,
4244
DEFAULT_ANTHROPIC_ENDPOINT,
4345
DEFAULT_DEEPSEEK_ENDPOINT,
@@ -48,6 +50,8 @@
4850
MIN_MAX_TOKENS,
4951
MAX_MAX_TOKENS,
5052
MIN_REQUEST_INTERVAL,
53+
MIN_API_TIMEOUT,
54+
MAX_API_TIMEOUT,
5155
DEFAULT_NAME_PREFIX,
5256
DEFAULT_MAX_HISTORY,
5357
CONF_MAX_HISTORY_SIZE,
@@ -131,6 +135,10 @@ async def async_step_provider(self, user_input: Optional[Dict[str, Any]] = None)
131135
vol.Coerce(float),
132136
vol.Range(min=MIN_REQUEST_INTERVAL)
133137
),
138+
vol.Optional(CONF_API_TIMEOUT, default=DEFAULT_API_TIMEOUT): vol.All(
139+
vol.Coerce(int),
140+
vol.Range(min=MIN_API_TIMEOUT, max=MAX_API_TIMEOUT)
141+
),
134142
vol.Optional(
135143
CONF_CONTEXT_MESSAGES,
136144
default=DEFAULT_CONTEXT_MESSAGES
@@ -182,6 +190,10 @@ async def async_step_provider(self, user_input: Optional[Dict[str, Any]] = None)
182190
vol.Coerce(float),
183191
vol.Range(min=MIN_REQUEST_INTERVAL)
184192
),
193+
vol.Optional(CONF_API_TIMEOUT, default=input_copy.get(CONF_API_TIMEOUT, DEFAULT_API_TIMEOUT)): vol.All(
194+
vol.Coerce(int),
195+
vol.Range(min=MIN_API_TIMEOUT, max=MAX_API_TIMEOUT)
196+
),
185197
vol.Optional(
186198
CONF_CONTEXT_MESSAGES,
187199
default=input_copy.get(CONF_CONTEXT_MESSAGES, DEFAULT_CONTEXT_MESSAGES)
@@ -224,6 +236,10 @@ async def async_step_provider(self, user_input: Optional[Dict[str, Any]] = None)
224236
vol.Coerce(float),
225237
vol.Range(min=MIN_REQUEST_INTERVAL)
226238
),
239+
vol.Optional(CONF_API_TIMEOUT, default=input_copy.get(CONF_API_TIMEOUT, DEFAULT_API_TIMEOUT)): vol.All(
240+
vol.Coerce(int),
241+
vol.Range(min=MIN_API_TIMEOUT, max=MAX_API_TIMEOUT)
242+
),
227243
vol.Optional(
228244
CONF_CONTEXT_MESSAGES,
229245
default=input_copy.get(CONF_CONTEXT_MESSAGES, DEFAULT_CONTEXT_MESSAGES)
@@ -282,6 +298,10 @@ async def async_step_provider(self, user_input: Optional[Dict[str, Any]] = None)
282298
vol.Coerce(float),
283299
vol.Range(min=MIN_REQUEST_INTERVAL)
284300
),
301+
vol.Optional(CONF_API_TIMEOUT, default=input_copy.get(CONF_API_TIMEOUT, DEFAULT_API_TIMEOUT)): vol.All(
302+
vol.Coerce(int),
303+
vol.Range(min=MIN_API_TIMEOUT, max=MAX_API_TIMEOUT)
304+
),
285305
vol.Optional(
286306
CONF_CONTEXT_MESSAGES,
287307
default=input_copy.get(CONF_CONTEXT_MESSAGES, DEFAULT_CONTEXT_MESSAGES)
@@ -433,6 +453,7 @@ async def _create_entry(self, user_input: Dict[str, Any]) -> FlowResult:
433453
CONF_TEMPERATURE: user_input.get(CONF_TEMPERATURE, DEFAULT_TEMPERATURE),
434454
CONF_MAX_TOKENS: user_input.get(CONF_MAX_TOKENS, DEFAULT_MAX_TOKENS),
435455
CONF_REQUEST_INTERVAL: user_input.get(CONF_REQUEST_INTERVAL, DEFAULT_REQUEST_INTERVAL),
456+
CONF_API_TIMEOUT: user_input.get(CONF_API_TIMEOUT, DEFAULT_API_TIMEOUT),
436457
CONF_CONTEXT_MESSAGES: user_input.get(CONF_CONTEXT_MESSAGES, DEFAULT_CONTEXT_MESSAGES),
437458
CONF_MAX_HISTORY_SIZE: user_input.get(CONF_MAX_HISTORY_SIZE, DEFAULT_MAX_HISTORY),
438459
}
@@ -504,6 +525,13 @@ async def async_step_init(self, user_input: Optional[Dict[str, Any]] = None) ->
504525
vol.Coerce(float),
505526
vol.Range(min=MIN_REQUEST_INTERVAL)
506527
),
528+
vol.Optional(
529+
CONF_API_TIMEOUT,
530+
default=current_data.get(CONF_API_TIMEOUT, DEFAULT_API_TIMEOUT)
531+
): vol.All(
532+
vol.Coerce(int),
533+
vol.Range(min=MIN_API_TIMEOUT, max=MAX_API_TIMEOUT)
534+
),
507535
vol.Optional(
508536
CONF_CONTEXT_MESSAGES,
509537
default=current_data.get(

custom_components/ha_text_ai/const.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
CONF_MAX_TOKENS: Final = "max_tokens"
6262
CONF_API_ENDPOINT: Final = "api_endpoint"
6363
CONF_REQUEST_INTERVAL: Final = "request_interval"
64+
CONF_API_TIMEOUT: Final = "api_timeout"
6465
CONF_INSTANCE: Final = "instance"
6566
CONF_MAX_HISTORY_SIZE: Final = "max_history_size" # Correct constant name
6667
CONF_IS_ANTHROPIC: Final = "is_anthropic"
@@ -79,6 +80,7 @@
7980
DEFAULT_MAX_TOKENS: Final = 1000
8081
DEFAULT_REQUEST_INTERVAL: Final = 1.0
8182
DEFAULT_TIMEOUT: Final = 30
83+
DEFAULT_API_TIMEOUT: Final = 30
8284
DEFAULT_MAX_HISTORY: Final = 50
8385
DEFAULT_NAME: Final = "HA Text AI"
8486
DEFAULT_NAME_PREFIX = "ha_text_ai"
@@ -93,9 +95,11 @@
9395
MAX_MAX_TOKENS: Final = 100000
9496
MIN_REQUEST_INTERVAL: Final = 0.1
9597
MAX_REQUEST_INTERVAL: Final = 60.0
98+
MIN_API_TIMEOUT: Final = 5
99+
MAX_API_TIMEOUT: Final = 600
96100

97101
# API constants
98-
API_TIMEOUT: Final = 30
102+
API_TIMEOUT: Final = 30 # Legacy constant, use CONF_API_TIMEOUT from config
99103
API_RETRY_COUNT: Final = 3
100104

101105
# Service names
@@ -232,6 +236,10 @@
232236
vol.Coerce(float),
233237
vol.Range(min=MIN_REQUEST_INTERVAL, max=MAX_REQUEST_INTERVAL)
234238
),
239+
vol.Optional(CONF_API_TIMEOUT, default=DEFAULT_API_TIMEOUT): vol.All(
240+
vol.Coerce(int),
241+
vol.Range(min=MIN_API_TIMEOUT, max=MAX_API_TIMEOUT)
242+
),
235243
vol.Optional(CONF_MAX_HISTORY_SIZE, default=DEFAULT_MAX_HISTORY): vol.All( # Correct usage
236244
vol.Coerce(int),
237245
vol.Range(min=1, max=100),

custom_components/ha_text_ai/coordinator.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
DEFAULT_MAX_TOKENS,
3838
DEFAULT_TEMPERATURE,
3939
DEFAULT_MAX_HISTORY,
40+
DEFAULT_API_TIMEOUT,
4041
DEFAULT_CONTEXT_MESSAGES,
4142
ABSOLUTE_MAX_HISTORY_SIZE,
4243
MAX_ATTRIBUTE_SIZE,
@@ -76,6 +77,7 @@ def __init__(
7677
max_history_size: int = DEFAULT_MAX_HISTORY,
7778
context_messages: int = DEFAULT_CONTEXT_MESSAGES,
7879
is_anthropic: bool = False,
80+
api_timeout: int = DEFAULT_API_TIMEOUT,
7981
) -> None:
8082
"""Initialize coordinator."""
8183
self.instance_name = instance_name
@@ -115,6 +117,7 @@ def __init__(
115117
ABSOLUTE_MAX_HISTORY_SIZE
116118
)
117119
self.is_anthropic = is_anthropic
120+
self.api_timeout = api_timeout
118121

119122
# Initialize essential attributes
120123
self._is_processing = False
@@ -916,7 +919,7 @@ async def async_process_question(
916919
async def async_process_message(self, question: str, **kwargs) -> dict:
917920
"""Process message using the AI client."""
918921
try:
919-
async with asyncio.timeout(60): # 60 second timeout
922+
async with asyncio.timeout(self.api_timeout):
920923
if self.is_anthropic:
921924
response = await self._process_anthropic_message(question, **kwargs)
922925
else:

custom_components/ha_text_ai/manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,6 @@
2424
"single_config_entry": false,
2525
"ssdp": [],
2626
"usb": [],
27-
"version": "2.1.9",
27+
"version": "2.2.0",
2828
"zeroconf": []
2929
}

custom_components/ha_text_ai/translations/de.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
"temperature": "Kreativität der Antwort (0-2, niedriger = fokussierter)",
2222
"max_tokens": "Maximale Länge der Antwort (1-100000 Token)",
2323
"request_interval": "Minimale Zeit zwischen Anfragen (0,1-60 Sekunden)",
24+
"api_timeout": "API-Anfrage Timeout in Sekunden (5-600)",
2425
"context_messages": "Anzahl der zu behaltenden Kontextnachrichten (1-20)",
2526
"max_history_size": "Maximale Größe des Gesprächsverlaufs (1-100)"
2627
}
@@ -37,6 +38,7 @@
3738
"api_endpoint": "Benutzerdefinierte API-Endpunkt-URL (optional)",
3839
"api_provider": "API-Anbieter",
3940
"request_interval": "Minimale Zeit zwischen Anfragen (0,1-60 Sekunden)",
41+
"api_timeout": "API-Anfrage Timeout in Sekunden (5-600)",
4042
"context_messages": "Anzahl der zu behaltenden Kontextnachrichten (1-20)",
4143
"max_history_size": "Maximale Größe des Gesprächsverlaufs (1-100)"
4244
}
@@ -79,6 +81,7 @@
7981
"temperature": "Kreativität der Antwort (0-2)",
8082
"max_tokens": "Maximale Länge der Antwort (1-100000)",
8183
"request_interval": "Minimale Anfrageintervall (0,1-60 Sekunden)",
84+
"api_timeout": "API-Anfrage Timeout in Sekunden (5-600)",
8285
"context_messages": "Anzahl der vorherigen Nachrichten, die im Kontext enthalten sein sollen (1-20)",
8386
"max_history_size": "Maximale Größe des Gesprächsverlaufs (1-100)"
8487
}

custom_components/ha_text_ai/translations/en.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
"temperature": "Response creativity (0-2, lower = more focused)",
2222
"max_tokens": "Maximum response length (1-100000 tokens)",
2323
"request_interval": "Minimum time between requests (0.1-60 seconds)",
24+
"api_timeout": "API request timeout in seconds (5-600)",
2425
"context_messages": "Number of context messages to retain (1-20)",
2526
"max_history_size": "Maximum conversation history size (1-100)"
2627
}
@@ -37,6 +38,7 @@
3738
"api_endpoint": "Custom API endpoint URL (optional)",
3839
"api_provider": "API Provider",
3940
"request_interval": "Minimum time between requests (0.1-60 seconds)",
41+
"api_timeout": "API request timeout in seconds (5-600)",
4042
"context_messages": "Number of context messages to retain (1-20)",
4143
"max_history_size": "Maximum conversation history size (1-100)"
4244
}
@@ -79,6 +81,7 @@
7981
"temperature": "Response creativity (0-2)",
8082
"max_tokens": "Maximum response length (1-100000)",
8183
"request_interval": "Minimum request interval (0.1-60 seconds)",
84+
"api_timeout": "API request timeout in seconds (5-600)",
8285
"context_messages": "Number of previous messages to include in context (1-20)",
8386
"max_history_size": "Maximum conversation history size (1-100)"
8487
}

custom_components/ha_text_ai/translations/es.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
"temperature": "Creatividad de la respuesta (0-2, menor = más enfocado)",
2222
"max_tokens": "Longitud máxima de la respuesta (1-100000 tokens)",
2323
"request_interval": "Tiempo mínimo entre solicitudes (0.1-60 segundos)",
24+
"api_timeout": "Tiempo de espera de solicitud API en segundos (5-600)",
2425
"context_messages": "Número de mensajes de contexto a retener (1-20)",
2526
"max_history_size": "Tamaño máximo del historial de conversación (1-100)"
2627
}
@@ -37,6 +38,7 @@
3738
"api_endpoint": "URL del endpoint de API personalizado (opcional)",
3839
"api_provider": "Proveedor de API",
3940
"request_interval": "Tiempo mínimo entre solicitudes (0.1-60 segundos)",
41+
"api_timeout": "Tiempo de espera de solicitud API en segundos (5-600)",
4042
"context_messages": "Número de mensajes de contexto a retener (1-20)",
4143
"max_history_size": "Tamaño máximo del historial de conversación (1-100)"
4244
}
@@ -79,6 +81,7 @@
7981
"temperature": "Creatividad de la respuesta (0-2)",
8082
"max_tokens": "Longitud máxima de la respuesta (1-100000)",
8183
"request_interval": "Intervalo mínimo de solicitud (0.1-60 segundos)",
84+
"api_timeout": "Tiempo de espera de solicitud API en segundos (5-600)",
8285
"context_messages": "Número de mensajes anteriores a incluir en el contexto (1-20)",
8386
"max_history_size": "Tamaño máximo del historial de conversación (1-100)"
8487
}

custom_components/ha_text_ai/translations/hi.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
"temperature": "प्रतिक्रिया की रचनात्मकता (0-2, कम = अधिक केंद्रित)",
1313
"max_tokens": "प्रतिक्रिया की अधिकतम लंबाई (1-100000 टोकन)",
1414
"request_interval": "अनुरोधों के बीच न्यूनतम समय (0.1-60 सेकंड)",
15+
"api_timeout": "एपीआई अनुरोध टाइमआउट सेकंड में (5-600)",
1516
"context_messages": "रखने के लिए संदर्भ संदेशों की संख्या (1-20)",
1617
"max_history_size": "अधिकतम बातचीत इतिहास आकार (1-100)"
1718
}
@@ -28,6 +29,7 @@
2829
"api_endpoint": "कस्टम एपीआई एंडपॉइंट यूआरएल (वैकल्पिक)",
2930
"api_provider": "एपीआई प्रदाता",
3031
"request_interval": "अनुरोधों के बीच न्यूनतम समय (0.1-60 सेकंड)",
32+
"api_timeout": "एपीआई अनुरोध टाइमआउट सेकंड में (5-600)",
3133
"context_messages": "रखने के लिए संदर्भ संदेशों की संख्या (1-20)",
3234
"max_history_size": "अधिकतम बातचीत इतिहास आकार (1-100)"
3335
}
@@ -70,6 +72,7 @@
7072
"temperature": "प्रतिक्रिया की रचनात्मकता (0-2)",
7173
"max_tokens": "प्रतिक्रिया की अधिकतम लंबाई (1-100000)",
7274
"request_interval": "न्यूनतम अनुरोध अंतराल (0.1-60 सेकंड)",
75+
"api_timeout": "एपीआई अनुरोध टाइमआउट सेकंड में (5-600)",
7376
"context_messages": "संदर्भ में शामिल करने के लिए पिछले संदेशों की संख्या (1-20)",
7477
"max_history_size": "अधिकतम बातचीत इतिहास आकार (1-100)"
7578
}

0 commit comments

Comments
 (0)