Skip to content

Commit 6bcd873

Browse files
committed
🚧 WIP
1 parent 0267341 commit 6bcd873

File tree

8 files changed

+251
-18
lines changed

8 files changed

+251
-18
lines changed

examples/ai/prompt.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import {prompt} from '@github/models'
2+
import 'dotenv/config'
3+
4+
async function main() {
5+
const result = await prompt('./teacher.prompt.yml', {
6+
subject: 'balloon popping',
7+
})
8+
9+
console.log('Result:')
10+
console.log(result.object || result.text)
11+
console.log()
12+
}
13+
14+
main().catch(console.error)

examples/ai/teacher.prompt.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
name: teacher
2+
model: openai/gpt-4.1
3+
modelParameters:
4+
temperature: 0.7
5+
maxTokens: 300
6+
responseFormat: json_object
7+
messages:
8+
- role: system
9+
content: You're an elementary school teacher who loves to make learning fun.
10+
- role: user
11+
content: Please explain {{subject}} in as little as 5 sentences and give it to be as json.

package-lock.json

Lines changed: 82 additions & 15 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,23 @@
3232
"dependencies": {
3333
"@ai-sdk/openai-compatible": "beta",
3434
"@ai-sdk/provider": "beta",
35-
"@ai-sdk/provider-utils": "beta"
35+
"@ai-sdk/provider-utils": "beta",
36+
"confbox": "^0.2.2",
37+
"templite": "^1.2.0",
38+
"zod": "^3 || ^4"
39+
},
40+
"peerDependencies": {
41+
"ai": "beta"
3642
},
3743
"devDependencies": {
38-
"eslint-plugin-simple-import-sort": "12.1.1",
39-
"tsup": "^8.3.0",
44+
"ai": "beta",
4045
"@github/prettier-config": "0.0.6",
4146
"@types/node": "^24",
4247
"eslint": "^9",
4348
"eslint-plugin-github": "^6",
49+
"eslint-plugin-simple-import-sort": "12.1.1",
4450
"prettier": "^3",
51+
"tsup": "^8.3.0",
4552
"typescript": "^5.7.2",
4653
"vitest": "^3"
4754
},

readme.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,10 @@ You can use the following optional settings to customize the GitHub Models provi
7474
You can use it as a middleware to intercept requests,
7575
or to provide a custom fetch implementation for e.g. testing.
7676

77+
## Prompt.yaml
78+
79+
TODO
80+
7781
## License
7882

7983
Distributed under the MIT license. See [LICENSE](./license.txt) for details.

src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
export type {ModelId} from './model-id'
2+
export {getPrompt, prompt} from './prompt'
23
export {
34
createGitHubModels,
45
githubModels,

src/options.ts

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
// See: https://docs.github.com/en/rest/models/catalog?apiVersion=2022-11-28#list-all-models
2+
export type GitHubModelsChatModelId =
3+
| 'openai/gpt-4.1'
4+
| 'openai/gpt-4.1-mini'
5+
| 'openai/gpt-4.1-nano'
6+
| 'openai/gpt-4o'
7+
| 'openai/gpt-4o-mini'
8+
| 'openai/o1'
9+
| 'openai/o1-mini'
10+
| 'openai/o1-preview'
11+
| 'openai/o3'
12+
| 'openai/o3-mini'
13+
| 'openai/o4-mini'
14+
| 'openai/text-embedding-3-large'
15+
| 'openai/text-embedding-3-small'
16+
| 'ai21-labs/ai21-jamba-1.5-large'
17+
| 'ai21-labs/ai21-jamba-1.5-mini'
18+
| 'cohere/cohere-command-a'
19+
| 'cohere/cohere-command-r-08-2024'
20+
| 'cohere/cohere-command-r-plus-08-2024'
21+
| 'cohere/cohere-embed-v3-english'
22+
| 'cohere/cohere-embed-v3-multilingual'
23+
| 'core42/jais-30b-chat'
24+
| 'deepseek/deepseek-r1'
25+
| 'deepseek/deepseek-r1-0528'
26+
| 'deepseek/deepseek-v3-0324'
27+
| 'meta/llama-3.2-11b-vision-instruct'
28+
| 'meta/llama-3.2-90b-vision-instruct'
29+
| 'meta/llama-3.3-70b-instruct'
30+
| 'meta/llama-4-maverick-17b-128e-instruct-fp8'
31+
| 'meta/llama-4-scout-17b-16e-instruct'
32+
| 'meta/meta-llama-3.1-405b-instruct'
33+
| 'meta/meta-llama-3.1-8b-instruct'
34+
| 'mistral-ai/codestral-2501'
35+
| 'mistral-ai/ministral-3b'
36+
| 'mistral-ai/mistral-large-2411'
37+
| 'mistral-ai/mistral-medium-2505'
38+
| 'mistral-ai/mistral-nemo'
39+
| 'mistral-ai/mistral-small-2503'
40+
| 'xai/grok-3'
41+
| 'xai/grok-3-mini'
42+
| 'microsoft/mai-ds-r1'
43+
| 'microsoft/phi-3.5-mini-instruct'
44+
| 'microsoft/phi-3.5-moe-instruct'
45+
| 'microsoft/phi-3.5-vision-instruct'
46+
| 'microsoft/phi-3-medium-128k-instruct'
47+
| 'microsoft/phi-3-medium-4k-instruct'
48+
| 'microsoft/phi-3-mini-128k-instruct'
49+
| 'microsoft/phi-3-mini-4k-instruct'
50+
| 'microsoft/phi-3-small-128k-instruct'
51+
| 'microsoft/phi-3-small-8k-instruct'
52+
| 'microsoft/phi-4'
53+
| 'microsoft/phi-4-mini-instruct'
54+
| 'microsoft/phi-4-mini-reasoning'
55+
| 'microsoft/phi-4-multimodal-instruct'
56+
| 'microsoft/phi-4-reasoning'
57+
| (string & {})

src/prompt.ts

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import {readFile} from 'node:fs/promises'
2+
3+
import {generateObject, generateText, jsonSchema} from 'ai'
4+
// eslint-disable-next-line import/no-unresolved
5+
import {parseYAML} from 'confbox/yaml'
6+
import templite from 'templite'
7+
import {z} from 'zod/v4'
8+
9+
import {createGitHubModels, type GitHubModelsProviderSettings} from './provider'
10+
11+
const message = z.discriminatedUnion('role', [
12+
z.object({role: z.literal('user'), content: z.string()}),
13+
z.object({role: z.literal('system'), content: z.string()}),
14+
])
15+
16+
const schema = z.object({
17+
name: z.string().optional(),
18+
description: z.string().optional(),
19+
model: z.string(),
20+
modelParameters: z
21+
.object({
22+
maxTokens: z.number().positive().optional(),
23+
temperature: z.number().min(0).max(1).optional(),
24+
topP: z.number().min(0).max(1).optional(),
25+
})
26+
.optional(),
27+
messages: z.array(message),
28+
responseFormat: z.enum(['text', 'json_object', 'json_schema']).optional(),
29+
jsonSchema: z.object().optional(),
30+
})
31+
32+
type PromptConfig = z.infer<typeof schema>
33+
type Variables = Record<string, string | number | boolean>
34+
35+
export async function getPrompt(filename: string | URL, variables: Variables = {}): Promise<PromptConfig> {
36+
const file = await readFile(filename, 'utf8')
37+
const yml = parseYAML(file, {
38+
filename: filename.toString(),
39+
})
40+
const p = schema.parse(yml)
41+
for (const msg of p.messages) msg.content = templite(msg.content, variables)
42+
return p
43+
}
44+
45+
export async function prompt(
46+
filename: string | URL,
47+
variables: Variables = {},
48+
options: GitHubModelsProviderSettings = {},
49+
) {
50+
const p = await getPrompt(filename, variables)
51+
const provider = createGitHubModels(options)
52+
53+
const model = provider(p.model)
54+
55+
if (p.responseFormat === 'json_object' || p.responseFormat === 'json_schema')
56+
return generateObject({
57+
model,
58+
schema: p.jsonSchema ? jsonSchema(p.jsonSchema) : z.any(),
59+
messages: p.messages,
60+
temperature: p.modelParameters?.temperature,
61+
maxOutputTokens: p.modelParameters?.maxTokens,
62+
topP: p.modelParameters?.topP,
63+
})
64+
65+
return generateText({
66+
model,
67+
messages: p.messages,
68+
temperature: p.modelParameters?.temperature,
69+
maxOutputTokens: p.modelParameters?.maxTokens,
70+
topP: p.modelParameters?.topP,
71+
})
72+
}

0 commit comments

Comments
 (0)