Skip to content

Commit 43f6a38

Browse files
committed
read prompt from file and print output to file
1 parent c7105a4 commit 43f6a38

File tree

8 files changed

+123
-34
lines changed

8 files changed

+123
-34
lines changed

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ avoid having to include the `node_modules/` directory in the repository.
4646
1. Make your change, add tests, and make sure the tests still pass:
4747
`npm run test`
4848
1. Make sure your code is correctly formatted: `npm run format`
49-
1. Update `dist/index.js` using `npm run build`. This creates a single
49+
1. Update `dist/index.js` using `npm run bundle`. This creates a single
5050
JavaScript file that is used as an entrypoint for the action
5151
1. Push to your fork and [submit a pull request][pr]
5252
1. Pat yourself on the back and wait for your pull request to be reviewed and

README.md

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,30 @@ jobs:
3434
run: echo "${{ steps.inference.outputs.response }}"
3535
```
3636
37+
### Using a Prompt File
38+
39+
You can also provide a prompt file instead of an inline prompt:
40+
41+
```yaml
42+
steps:
43+
- name: Checkout repository
44+
uses: actions/checkout@v4
45+
46+
- name: Run AI Inference with Prompt File
47+
id: inference
48+
uses: actions/ai-inference@v1
49+
with:
50+
prompt-file: './path/to/prompt.txt'
51+
52+
- name: Use Response File
53+
run: |
54+
echo "Response saved to: ${{ steps.inference.outputs.response-path }}"
55+
cat "${{ steps.inference.outputs.response-path }}"
56+
```
57+
58+
This is particularly useful for longer prompts or when you need to use the file
59+
path where the response is stored.
60+
3761
## Inputs
3862
3963
Various inputs are defined in [`action.yml`](action.yml) to let you configure
@@ -43,6 +67,7 @@ the action:
4367
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
4468
| `token` | Token to use for inference. Typically the GITHUB_TOKEN secret | `github.token` |
4569
| `prompt` | The prompt to send to the model | N/A |
70+
| `prompt-file` | Path to a file containing the prompt. If both `prompt` and `prompt-file` are provided, `prompt-file` takes precedence | `""` |
4671
| `system-prompt` | The system prompt to send to the model | `""` |
4772
| `model` | The model to use for inference. Must be available in the [GitHub Models](https://github.com/marketplace?type=models) catalog | `gpt-4o` |
4873
| `endpoint` | The endpoint to use for inference. If you're running this as part of an org, you should probably use the org-specific Models endpoint | `https://models.github.ai/inference` |
@@ -52,9 +77,10 @@ the action:
5277

5378
The AI inference action provides the following outputs:
5479

55-
| Name | Description |
56-
| ---------- | --------------------------- |
57-
| `response` | The response from the model |
80+
| Name | Description |
81+
| --------------- | ----------------------------------------------------------------------- |
82+
| `response` | The response from the model |
83+
| `response-path` | The file path where the response is saved (useful for larger responses) |
5884

5985
## Required Permissions
6086

__tests__/main.test.ts

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
*/
88
import { jest } from '@jest/globals'
99
import * as core from '../__fixtures__/core.js'
10-
1110
const mockPost = jest.fn().mockImplementation(() => ({
1211
body: {
1312
choices: [
@@ -58,11 +57,21 @@ describe('main.ts', () => {
5857
'response',
5958
'Hello, user!'
6059
)
60+
61+
expect(core.setOutput).toHaveBeenNthCalledWith(
62+
2,
63+
'response-path',
64+
expect.stringContaining('modelResponse.txt')
65+
)
6166
})
6267

6368
it('Sets a failed status', async () => {
64-
// Clear the getInput mock and return an empty prompt
65-
core.getInput.mockClear().mockReturnValueOnce('')
69+
// Clear the getInput mock and simulate no prompt or prompt-file input
70+
core.getInput.mockImplementation((name) => {
71+
if (name === 'prompt') return ''
72+
if (name === 'prompt_file') return ''
73+
return ''
74+
})
6675

6776
await run()
6877

action.yml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,11 @@ branding:
1111
inputs:
1212
prompt:
1313
description: The prompt for the model
14-
required: true
14+
required: false
15+
default: ''
16+
prompt-file:
17+
description: Path to a file containing the prompt
18+
required: false
1519
default: ''
1620
model:
1721
description: The model to use
@@ -38,6 +42,8 @@ inputs:
3842
outputs:
3943
response:
4044
description: The response from the model
45+
response-path:
46+
description: The file path where the response is saved
4147

4248
runs:
4349
using: node20

dist/index.js

Lines changed: 43 additions & 22 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

dist/index.js.map

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/main.ts

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
import * as core from '@actions/core'
22
import ModelClient, { isUnexpected } from '@azure-rest/ai-inference'
33
import { AzureKeyCredential } from '@azure/core-auth'
4+
import * as fs from 'fs'
5+
import * as os from 'os'
6+
import * as path from 'path'
7+
8+
const RESPONSE_FILE = 'modelResponse.txt'
49

510
/**
611
* The main function for the action.
@@ -9,7 +14,16 @@ import { AzureKeyCredential } from '@azure/core-auth'
914
*/
1015
export async function run(): Promise<void> {
1116
try {
12-
const prompt: string = core.getInput('prompt')
17+
const promptFile: string = core.getInput('prompt-file')
18+
let prompt: string = core.getInput('prompt')
19+
20+
if (promptFile !== undefined && promptFile !== '') {
21+
if (!fs.existsSync(promptFile)) {
22+
throw new Error(`Prompt file not found: ${promptFile}`)
23+
}
24+
prompt = fs.readFileSync(promptFile, 'utf-8')
25+
}
26+
1327
if (prompt === undefined || prompt === '') {
1428
throw new Error('prompt is not set')
1529
}
@@ -60,6 +74,14 @@ export async function run(): Promise<void> {
6074

6175
// Set outputs for other workflow steps to use
6276
core.setOutput('response', modelResponse || '')
77+
78+
// Save the response to a file in case the response overflow the output limit
79+
const responseFilePath = path.join(tempDir(), RESPONSE_FILE)
80+
core.setOutput('response-path', responseFilePath)
81+
82+
if (modelResponse && modelResponse !== '') {
83+
fs.writeFileSync(responseFilePath, modelResponse, 'utf-8')
84+
}
6385
} catch (error) {
6486
// Fail the workflow run if an error occurs
6587
if (error instanceof Error) {
@@ -69,3 +91,8 @@ export async function run(): Promise<void> {
6991
}
7092
}
7193
}
94+
95+
function tempDir(): string {
96+
const tempDirectory = process.env['RUNNER_TEMP'] || os.tmpdir()
97+
return tempDirectory
98+
}

0 commit comments

Comments
 (0)