Skip to content

Commit 654d430

Browse files
committed
feat: Adds in prompt.yml support
1 parent 947d882 commit 654d430

File tree

8 files changed

+208
-25
lines changed

8 files changed

+208
-25
lines changed

examples/ai/generate-text.ts

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,14 @@ import {githubModels} from '@github/models'
22
import {generateText} from 'ai'
33
import 'dotenv/config'
44

5-
async function main() {
6-
const result = await generateText({
7-
model: githubModels('openai/gpt-4o'),
8-
prompt: 'I want 100 words on how to inflate a balloon.',
9-
})
5+
const result = await generateText({
6+
model: githubModels('openai/gpt-4o'),
7+
prompt: 'I want 100 words on how to inflate a balloon.',
8+
})
109

11-
console.log('Text:')
12-
console.log(result.text)
13-
console.log()
10+
console.log('Text:')
11+
console.log(result.text)
12+
console.log()
1413

15-
console.log('Token usage:', result.usage)
16-
console.log('Finish reason:', result.finishReason)
17-
}
18-
19-
main().catch(console.error)
14+
console.log('Token usage:', result.usage)
15+
console.log('Finish reason:', result.finishReason)

examples/ai/prompt.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import {readFile} from 'node:fs/promises'
2+
import {parseYAML} from 'confbox/yaml'
3+
import {createObjectPrompt} from '@github/models/prompt'
4+
import {generateObject} from 'ai'
5+
import 'dotenv/config'
6+
7+
const promptFile = parseYAML(await readFile('./teacher.prompt.yml', 'utf8'))
8+
const prompt = createObjectPrompt(promptFile)
9+
10+
const result = await generateObject(
11+
prompt({
12+
subject: 'balloon popping',
13+
}),
14+
)
15+
16+
console.log('Object:')
17+
console.log(result.object)
18+
console.log()
19+
20+
console.log('Token usage:', result.usage)
21+
console.log('Finish reason:', result.finishReason)

examples/ai/teacher.prompt.yml

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
name: teacher
2+
model: openai/gpt-4.1
3+
responseFormat: json_schema
4+
jsonSchema: |-
5+
{
6+
"name": "explanation",
7+
"strict": true,
8+
"schema": {
9+
"type": "object",
10+
"properties": {
11+
"lesson": {
12+
"type": "string",
13+
"description": "The lesson about the subject"
14+
}
15+
},
16+
"additionalProperties": false,
17+
"required": [
18+
"lesson"
19+
]
20+
}
21+
}
22+
messages:
23+
- role: system
24+
content: You're an elementary school teacher who loves to make learning fun.
25+
- role: user
26+
content: Please explain {{subject}} in as little as 5 sentences and give it to be as json.

package-lock.json

Lines changed: 23 additions & 9 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414
"types": "./dist/index.d.ts",
1515
"default": "./dist/index.js"
1616
},
17+
"./prompt": {
18+
"types": "./dist/prompt.d.ts",
19+
"default": "./dist/prompt.js"
20+
},
1721
"./package.json": "./package.json"
1822
},
1923
"scripts": {
@@ -39,19 +43,22 @@
3943
"dependencies": {
4044
"@ai-sdk/openai-compatible": "beta",
4145
"@ai-sdk/provider": "beta",
42-
"@ai-sdk/provider-utils": "beta"
46+
"@ai-sdk/provider-utils": "beta",
47+
"templite": "^1.2.0",
48+
"tiny-invariant": "^1.3.3",
49+
"zod": "^3 || ^4"
4350
},
4451
"devDependencies": {
4552
"@github/prettier-config": "0.0.6",
4653
"@types/node": "^24",
54+
"dotenv": "^17.2.0",
4755
"eslint": "^9",
4856
"eslint-plugin-github": "^6",
4957
"eslint-plugin-simple-import-sort": "12.1.1",
5058
"prettier": "^3",
5159
"tsup": "^8.3.0",
5260
"typescript": "^5.7.2",
53-
"vitest": "^3",
54-
"dotenv": "^17.2.0"
61+
"vitest": "^3"
5562
},
5663
"engines": {
5764
"node": ">=20"

src/prompt.ts

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
import type {ProviderV2} from '@ai-sdk/provider'
2+
import {jsonSchema} from '@ai-sdk/provider-utils'
3+
import templite from 'templite'
4+
import invariant from 'tiny-invariant'
5+
import {z} from 'zod/v4'
6+
7+
import {githubModels} from './provider'
8+
9+
type Variables = Record<string, string | number | boolean>
10+
11+
export function crateTextPrompt(config: unknown, provider?: ProviderV2) {
12+
const p = schema.parse(config)
13+
if (p.responseFormat) invariant(p.responseFormat === 'text', 'responseFormat must be "text" to generate text')
14+
15+
provider ||= githubModels
16+
17+
const model = provider.languageModel(p.model)
18+
19+
return <V extends Variables>(variables: V) => {
20+
return {
21+
model,
22+
messages: messages(p, variables),
23+
...settings(p),
24+
} as const
25+
}
26+
}
27+
28+
export function createObjectPrompt(config: unknown, provider?: ProviderV2) {
29+
const p = schema.parse(config)
30+
invariant(
31+
p.responseFormat === 'json_object' || p.responseFormat === 'json_schema',
32+
'responseFormat must be either "json_object" or "json_schema" to generate objects',
33+
)
34+
35+
provider ||= githubModels
36+
const model = provider.languageModel(p.model)
37+
38+
let schemaProperties = {}
39+
if (p.responseFormat === 'json_schema' && p.jsonSchema) {
40+
const jSchema = JSON.parse(p.jsonSchema)
41+
schemaProperties = {
42+
output: undefined,
43+
schema: jsonSchema(jSchema.schema),
44+
schemaName: jSchema.name,
45+
schemaDescription: jSchema.description,
46+
}
47+
}
48+
49+
return <V extends Variables>(variables: V) => {
50+
return {
51+
model,
52+
messages: messages(p, variables),
53+
output: 'no-schema',
54+
...schemaProperties,
55+
...settings(p),
56+
} as const
57+
}
58+
}
59+
60+
// ---
61+
62+
function messages<V extends Variables>(config: Schema, variables: V) {
63+
const msgs = [] as Message[]
64+
for (const msg of config.messages) {
65+
msgs.push({
66+
role: msg.role,
67+
content: templite(msg.content, variables),
68+
})
69+
}
70+
return msgs
71+
}
72+
73+
function settings(config: Schema) {
74+
return {
75+
maxOutputTokens: config.modelParameters?.maxTokens,
76+
temperature: config.modelParameters?.temperature,
77+
topP: config.modelParameters?.topP,
78+
} as const
79+
}
80+
81+
const message = z.discriminatedUnion('role', [
82+
z.object({role: z.literal('user'), content: z.string()}),
83+
z.object({role: z.literal('system'), content: z.string()}),
84+
])
85+
86+
type Message = z.infer<typeof message>
87+
88+
const schema = z
89+
.object({
90+
name: z.string().optional(),
91+
description: z.string().optional(),
92+
model: z.string(),
93+
modelParameters: z
94+
.object({
95+
maxTokens: z.number().positive().optional(),
96+
temperature: z.number().min(0).max(1).optional(),
97+
topP: z.number().min(0).max(1).optional(),
98+
})
99+
.optional(),
100+
messages: z.array(message),
101+
responseFormat: z.enum(['text', 'json_object', 'json_schema']).optional(),
102+
jsonSchema: z.string().optional(),
103+
})
104+
.refine(
105+
data => {
106+
if (data.responseFormat === 'json_schema') {
107+
return data.jsonSchema !== undefined
108+
}
109+
return true
110+
},
111+
{
112+
message: "jsonSchema must be provided when responseFormat is 'json_schema'",
113+
path: ['jsonSchema'],
114+
},
115+
)
116+
117+
type Schema = z.infer<typeof schema>

src/provider.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ export function createGitHubModels(options: GitHubModelsProviderOptions = {}): G
6666
headers: getHeaders,
6767
fetch: options.fetch,
6868
includeUsage: true,
69+
supportsStructuredOutputs: true,
6970
} satisfies OpenAICompatibleChatConfig
7071

7172
const createModel = (modelId: GitHubModelsChatModelId) => new OpenAICompatibleChatLanguageModel(modelId, baseOptions)

tsup.config.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ export default defineConfig([
44
{
55
entry: {
66
index: 'src/index.ts',
7+
prompt: 'src/prompt.ts',
78
},
89
format: ['esm'],
910
dts: true,

0 commit comments

Comments
 (0)