Skip to content

Commit 4430f6b

Browse files
committed
feat: Adds in prompt.yml support
1 parent ab1ee5a commit 4430f6b

File tree

11 files changed

+413
-27
lines changed

11 files changed

+413
-27
lines changed

examples/ai/generate-text.ts

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,14 @@ import {githubModels} from '@github/models'
22
import {generateText} from 'ai'
33
import 'dotenv/config'
44

5-
async function main() {
6-
const result = await generateText({
7-
model: githubModels('openai/gpt-4o'),
8-
prompt: 'I want 100 words on how to inflate a balloon.',
9-
})
5+
const result = await generateText({
6+
model: githubModels('openai/gpt-4o'),
7+
prompt: 'I want 100 words on how to inflate a balloon.',
8+
})
109

11-
console.log('Text:')
12-
console.log(result.text)
13-
console.log()
10+
console.log('Text:')
11+
console.log(result.text)
12+
console.log()
1413

15-
console.log('Token usage:', result.usage)
16-
console.log('Finish reason:', result.finishReason)
17-
}
18-
19-
main().catch(console.error)
14+
console.log('Token usage:', result.usage)
15+
console.log('Finish reason:', result.finishReason)

examples/ai/package.json

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,9 @@
77
"node": ">=24"
88
},
99
"dependencies": {
10+
"@github/models": "file:../../",
1011
"ai": "beta",
11-
"dotenv": "^17",
12-
"@github/models": "file:../../"
12+
"confbox": "^0.2",
13+
"dotenv": "^17"
1314
}
1415
}

examples/ai/prompt.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import {readFile} from 'node:fs/promises'
2+
import {parseYAML} from 'confbox/yaml'
3+
import {createObjectPrompt} from '@github/models/prompt'
4+
import {generateObject} from 'ai'
5+
import 'dotenv/config'
6+
7+
const promptFile = parseYAML(await readFile('./teacher.prompt.yml', 'utf8'))
8+
const prompt = createObjectPrompt(promptFile)
9+
10+
const result = await generateObject(
11+
prompt({
12+
subject: 'balloon popping',
13+
}),
14+
)
15+
16+
console.log('Object:')
17+
console.log(result.object)
18+
console.log()
19+
20+
console.log('Token usage:', result.usage)
21+
console.log('Finish reason:', result.finishReason)

examples/ai/teacher.prompt.yml

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
name: teacher
2+
model: openai/gpt-4.1
3+
responseFormat: json_schema
4+
jsonSchema: |-
5+
{
6+
"name": "explanation",
7+
"strict": true,
8+
"schema": {
9+
"type": "object",
10+
"properties": {
11+
"lesson": {
12+
"type": "string",
13+
"description": "The lesson about the subject"
14+
}
15+
},
16+
"additionalProperties": false,
17+
"required": [
18+
"lesson"
19+
]
20+
}
21+
}
22+
messages:
23+
- role: system
24+
content: You're an elementary school teacher who loves to make learning fun.
25+
- role: user
26+
content: Please explain {{subject}} in as little as 5 sentences and give it to be as json.

package-lock.json

Lines changed: 23 additions & 9 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414
"types": "./dist/index.d.ts",
1515
"default": "./dist/index.js"
1616
},
17+
"./prompt": {
18+
"types": "./dist/prompt.d.ts",
19+
"default": "./dist/prompt.js"
20+
},
1721
"./package.json": "./package.json"
1822
},
1923
"scripts": {
@@ -39,19 +43,22 @@
3943
"dependencies": {
4044
"@ai-sdk/openai-compatible": "beta",
4145
"@ai-sdk/provider": "beta",
42-
"@ai-sdk/provider-utils": "beta"
46+
"@ai-sdk/provider-utils": "beta",
47+
"templite": "^1.2.0",
48+
"tiny-invariant": "^1.3.3",
49+
"zod": "^3 || ^4"
4350
},
4451
"devDependencies": {
4552
"@github/prettier-config": "0.0.6",
4653
"@types/node": "^24",
54+
"dotenv": "^17.2.0",
4755
"eslint": "^9",
4856
"eslint-plugin-github": "^6",
4957
"eslint-plugin-simple-import-sort": "12.1.1",
5058
"prettier": "^3",
5159
"tsup": "^8.3.0",
5260
"typescript": "^5.7.2",
53-
"vitest": "^3",
54-
"dotenv": "^17.2.0"
61+
"vitest": "^3"
5562
},
5663
"engines": {
5764
"node": ">=20"

readme.md

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,104 @@ const githubModels = createGithubModels({
6767
You can use it as a middleware to intercept requests,
6868
or to provide a custom fetch implementation for e.g. testing.
6969

70+
## Prompt Management
71+
72+
The `@github/models/prompt` sub-module provides a powerful way to integrate with AI SDK methods like `generateText` and `generateObject` using your `prompt.yml` files.
73+
Prompt YAML files are designed to create reusable artifacts that integrate with GitHub's suite of AI tools. Check out the [Models tab](https://github.com/github/models-ai-sdk/models).
74+
75+
### Example
76+
77+
Create a `.prompt.yml` file:
78+
79+
```yaml
80+
name: teacher
81+
description: An elementary school teacher who explains concepts simply
82+
model: openai/gpt-4o
83+
modelParameters:
84+
temperature: 0.7
85+
maxTokens: 500
86+
messages:
87+
- role: system
88+
content: You're an elementary school teacher who loves making learning fun.
89+
- role: user
90+
content: Explain {{subject}} in exactly {{sentences}} sentences for a 10-year-old.
91+
```
92+
93+
Use `createTextPrompt` for text-based responses:
94+
95+
```ts
96+
import {readFile} from 'node:fs/promises'
97+
import {parseYAML} from 'confbox/yaml'
98+
import {createTextPrompt} from '@github/models/prompt'
99+
import {generateText} from 'ai'
100+
101+
const config = parseYAML(await readFile('./teacher.prompt.yml', 'utf8'))
102+
const prompt = createTextPrompt(config)
103+
104+
const result = await generateText(
105+
prompt({
106+
subject: 'photosynthesis',
107+
sentences: '5',
108+
}),
109+
)
110+
111+
console.log(result.text)
112+
```
113+
114+
<details><summary>Structured Generation</summary>
115+
116+
Use `createObjectPrompt` for JSON responses with schema validation:
117+
118+
```yaml
119+
name: recipe-generator
120+
model: openai/gpt-4o
121+
responseFormat: json_schema
122+
jsonSchema: |-
123+
{
124+
"name": "recipe",
125+
"strict": true,
126+
"schema": {
127+
"type": "object",
128+
"properties": {
129+
"title": {"type": "string"},
130+
"ingredients": {
131+
"type": "array",
132+
"items": {"type": "string"}
133+
},
134+
"instructions": {
135+
"type": "array",
136+
"items": {"type": "string"}
137+
}
138+
},
139+
"required": ["title", "ingredients", "instructions"],
140+
"additionalProperties": false
141+
}
142+
}
143+
messages:
144+
- role: user
145+
content: Create a recipe for {{dish}} that serves {{servings}} people.
146+
```
147+
148+
```ts
149+
import {createObjectPrompt} from '@github/models/prompt'
150+
import {generateObject} from 'ai'
151+
152+
const config = parseYAML(await readFile('./recipe.prompt.yml', 'utf8'))
153+
const prompt = createObjectPrompt(config)
154+
155+
const result = await generateObject(
156+
prompt({
157+
dish: 'chocolate chip cookies',
158+
servings: '4',
159+
}),
160+
)
161+
162+
console.log(result.object.title)
163+
console.log(result.object.ingredients)
164+
```
165+
166+
</details>
167+
70168
## API Reference
71169

72170
The GitHub Models provider uses the [GitHub Models Inference API](https://docs.github.com/en/rest/models/inference?apiVersion=2022-11-28#run-an-inference-request).

0 commit comments

Comments
 (0)