Skip to content

Commit 3564b6d

Browse files
author
valentinfrlch
committed
Fixes #437: Remove max_completion_tokens for OpenAI validation, increase max_tokens limit in title_request to 4096 and
1 parent 498ab44 commit 3564b6d

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

custom_components/llmvision/providers.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -442,7 +442,7 @@ async def vision_request(self, call: dict) -> str:
442442
return await self._make_request(data)
443443

444444
async def title_request(self, call: dict) -> str:
445-
# call.max_tokens = 1000
445+
call.max_tokens = 4096
446446
data = self._prepare_text_data(call)
447447
return await self._make_request(data)
448448

@@ -587,9 +587,7 @@ async def validate(self) -> None | ServiceValidationError:
587587
"model": self.model,
588588
"messages": [
589589
{"role": "user", "content": [{"type": "text", "text": "Hi"}]}
590-
],
591-
"max_completion_tokens": 1,
592-
"temperature": 0.5,
590+
]
593591
}
594592
await self._post(
595593
url=self.endpoint.get("base_url"), headers=headers, data=data

0 commit comments

Comments
 (0)