Skip to content

Commit 9fce177

Browse files
Fix/qwen3 vl plus highres (QwenLM#721)
* feat: Add Qwen3-VL-Plus token limits (256K input, 32K output) - Added 256K input context window limit for Qwen3-VL-Plus model - Updated output token limit from 8K to 32K for Qwen3-VL-Plus - Added comprehensive tests for both input and output limits As requested by Qwen maintainers for proper model support. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]> * fix: enable high-res flag for qwen VL models --------- Co-authored-by: Claude <[email protected]>
1 parent f784133 commit 9fce177

File tree

2 files changed

+77
-1
lines changed

2 files changed

+77
-1
lines changed

packages/core/src/core/openaiContentGenerator/provider/dashscope.test.ts

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -688,6 +688,60 @@ describe('DashScopeOpenAICompatibleProvider', () => {
688688
).toBe(true); // Vision-specific parameter should be preserved
689689
});
690690

691+
it('should set high resolution flag for qwen3-vl-plus', () => {
692+
const request: OpenAI.Chat.ChatCompletionCreateParams = {
693+
model: 'qwen3-vl-plus',
694+
messages: [
695+
{
696+
role: 'user',
697+
content: [
698+
{ type: 'text', text: 'Please inspect the image.' },
699+
{
700+
type: 'image_url',
701+
image_url: { url: 'https://example.com/vl.jpg' },
702+
},
703+
],
704+
},
705+
],
706+
max_tokens: 50000,
707+
};
708+
709+
const result = provider.buildRequest(request, 'test-prompt-id');
710+
711+
expect(result.max_tokens).toBe(32768);
712+
expect(
713+
(result as { vl_high_resolution_images?: boolean })
714+
.vl_high_resolution_images,
715+
).toBe(true);
716+
});
717+
718+
it('should set high resolution flag for the vision-model alias', () => {
719+
const request: OpenAI.Chat.ChatCompletionCreateParams = {
720+
model: 'vision-model',
721+
messages: [
722+
{
723+
role: 'user',
724+
content: [
725+
{ type: 'text', text: 'Alias payload' },
726+
{
727+
type: 'image_url',
728+
image_url: { url: 'https://example.com/alias.png' },
729+
},
730+
],
731+
},
732+
],
733+
max_tokens: 9000,
734+
};
735+
736+
const result = provider.buildRequest(request, 'test-prompt-id');
737+
738+
expect(result.max_tokens).toBe(8192);
739+
expect(
740+
(result as { vl_high_resolution_images?: boolean })
741+
.vl_high_resolution_images,
742+
).toBe(true);
743+
});
744+
691745
it('should handle streaming requests with output token limits', () => {
692746
const request: OpenAI.Chat.ChatCompletionCreateParams = {
693747
model: 'qwen3-coder-plus',

packages/core/src/core/openaiContentGenerator/provider/dashscope.ts

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ export class DashScopeOpenAICompatibleProvider
100100
request.model,
101101
);
102102

103-
if (request.model.startsWith('qwen-vl')) {
103+
if (this.isVisionModel(request.model)) {
104104
return {
105105
...requestWithTokenLimits,
106106
messages,
@@ -267,6 +267,28 @@ export class DashScopeOpenAICompatibleProvider
267267
return contentArray;
268268
}
269269

270+
private isVisionModel(model: string | undefined): boolean {
271+
if (!model) {
272+
return false;
273+
}
274+
275+
const normalized = model.toLowerCase();
276+
277+
if (normalized === 'vision-model') {
278+
return true;
279+
}
280+
281+
if (normalized.startsWith('qwen-vl')) {
282+
return true;
283+
}
284+
285+
if (normalized.startsWith('qwen3-vl-plus')) {
286+
return true;
287+
}
288+
289+
return false;
290+
}
291+
270292
/**
271293
* Apply output token limit to a request's max_tokens parameter.
272294
*

0 commit comments

Comments
 (0)