Skip to content

Commit 7ee8b71

Browse files
committed
feat: support openai-gpt5.1 and update genkit
1 parent 650fc97 commit 7ee8b71

File tree

3 files changed

+67
-48
lines changed

3 files changed

+67
-48
lines changed

package.json

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@
5353
"dependencies": {
5454
"@anthropic-ai/sdk": "^0.68.0",
5555
"@axe-core/puppeteer": "^4.10.2",
56-
"@genkit-ai/compat-oai": "^1.19.1",
56+
"@genkit-ai/compat-oai": "1.23.0",
5757
"@genkit-ai/googleai": "1.22.0",
5858
"@genkit-ai/mcp": "1.22.0",
59-
"@google/genai": "1.29.0",
59+
"@google/genai": "1.29.1",
6060
"@inquirer/prompts": "^7.8.4",
6161
"@safety-web/runner": "0.4.0-alpha.14",
6262
"@safety-web/types": "0.4.0-alpha.14",
@@ -68,7 +68,7 @@
6868
"chalk": "^5.4.1",
6969
"cli-progress": "^3.12.0",
7070
"file-type": "^21.0.0",
71-
"genkit": "^1.19.1",
71+
"genkit": "^1.23.0",
7272
"genkitx-anthropic": "0.25.0",
7373
"handlebars": "^4.7.8",
7474
"lighthouse": "^13.0.0",
@@ -81,7 +81,7 @@
8181
"strict-csp": "^1.1.1",
8282
"stylelint": "^16.21.1",
8383
"stylelint-config-recommended-scss": "^16.0.0",
84-
"tiktoken": "^1.0.22",
84+
"tiktoken": "1.0.22",
8585
"tinyglobby": "^0.2.14",
8686
"tree-kill": "^1.2.2",
8787
"tsx": "^4.20.3",

pnpm-lock.yaml

Lines changed: 48 additions & 43 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

runner/codegen/genkit/providers/open-ai.ts

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ export class OpenAiModelProvider extends GenkitModelProvider {
1111
'openai-o3': () => openAI.model('o3'),
1212
'openai-o4-mini': () => openAI.model('o4-mini'),
1313
'openai-gpt-5': () => openAI.model('gpt-5'),
14+
'openai-gpt-5.1': () => openAI.model('gpt-5.1'),
1415
};
1516

1617
private countTokensForModel(
@@ -60,9 +61,22 @@ export class OpenAiModelProvider extends GenkitModelProvider {
6061
interval: 1000 * 60 * 1.5, // Refresh tokens after 1.5 minutes to be on the safe side.
6162
}),
6263
tokensPerMinute: new RateLimiter({
63-
tokensPerInterval: 30_000 * 0.75, // *0.75 to be more resilient to token count deviations
64+
tokensPerInterval: 500_000 * 0.75, // *0.75 to be more resilient to token count deviations
65+
interval: 1000 * 60 * 1.5, // Refresh tokens after 1.5 minutes to be on the safe side.
66+
}),
67+
countTokens: async prompt => this.countTokensForModel('gpt-5', prompt),
68+
},
69+
// See: https://platform.openai.com/docs/models/gpt-5.1
70+
'openai/gpt-5.1': {
71+
requestPerMinute: new RateLimiter({
72+
tokensPerInterval: 500,
73+
interval: 1000 * 60 * 1.5, // Refresh tokens after 1.5 minutes to be on the safe side.
74+
}),
75+
tokensPerMinute: new RateLimiter({
76+
tokensPerInterval: 500_000 * 0.75, // *0.75 to be more resilient to token count deviations
6477
interval: 1000 * 60 * 1.5, // Refresh tokens after 1.5 minutes to be on the safe side.
6578
}),
79+
// TODO: Consider selecting GPT5.1 if available; but it's GPT5 counting should work too.
6680
countTokens: async prompt => this.countTokensForModel('gpt-5', prompt),
6781
},
6882
};

0 commit comments

Comments
 (0)