Skip to content

Commit b72d848

Browse files
authored
Merge pull request #15 from actions/aiqiaoy/read-prompt-from-file
read prompt from file and print output to file
2 parents c7105a4 + 2aea1d4 commit b72d848

File tree

9 files changed

+196
-43
lines changed

9 files changed

+196
-43
lines changed

.github/workflows/ci.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,3 +67,28 @@ jobs:
6767
- name: Print Output
6868
id: output
6969
run: echo "${{ steps.test-action.outputs.response }}"
70+
71+
test-action-prompt-file:
72+
name: GitHub Actions Test with Prompt File
73+
runs-on: ubuntu-latest
74+
75+
steps:
76+
- name: Checkout
77+
id: checkout
78+
uses: actions/checkout@v4
79+
80+
- name: Create Prompt File
81+
run: echo "hello" > prompt.txt
82+
83+
- name: Test Local Action with Prompt File
84+
id: test-action-prompt-file
85+
uses: ./
86+
with:
87+
prompt-file: prompt.txt
88+
env:
89+
GITHUB_TOKEN: ${{ github.token }}
90+
91+
- name: Print Output
92+
run: |
93+
echo "Response saved to: ${{ steps.test-action-prompt-file.outputs.response-file }}"
94+
cat "${{ steps.test-action-prompt-file.outputs.response-file }}"

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ avoid having to include the `node_modules/` directory in the repository.
4646
1. Make your change, add tests, and make sure the tests still pass:
4747
`npm run test`
4848
1. Make sure your code is correctly formatted: `npm run format`
49-
1. Update `dist/index.js` using `npm run build`. This creates a single
49+
1. Update `dist/index.js` using `npm run bundle`. This creates a single
5050
JavaScript file that is used as an entrypoint for the action
5151
1. Push to your fork and [submit a pull request][pr]
5252
1. Pat yourself on the back and wait for your pull request to be reviewed and

README.md

Lines changed: 36 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,37 @@ jobs:
3434
run: echo "${{ steps.inference.outputs.response }}"
3535
```
3636
37+
### Using a prompt file
38+
39+
You can also provide a prompt file instead of an inline prompt:
40+
41+
```yaml
42+
steps:
43+
- name: Run AI Inference with Prompt File
44+
id: inference
45+
uses: actions/ai-inference@v1
46+
with:
47+
prompt-file: './path/to/prompt.txt'
48+
```
49+
50+
### Read output from file instead of output
51+
52+
This can be useful when model response exceeds actions output limit
53+
54+
```yaml
55+
steps:
56+
- name: Test Local Action
57+
id: inference
58+
uses: actions/ai-inference@v1
59+
with:
60+
prompt: 'Hello!'
61+
62+
- name: Use Response File
63+
run: |
64+
echo "Response saved to: ${{ steps.inference.outputs.response-file }}"
65+
cat "${{ steps.inference.outputs.response-file }}"
66+
```
67+
3768
## Inputs
3869
3970
Various inputs are defined in [`action.yml`](action.yml) to let you configure
@@ -43,6 +74,7 @@ the action:
4374
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
4475
| `token` | Token to use for inference. Typically the GITHUB_TOKEN secret | `github.token` |
4576
| `prompt` | The prompt to send to the model | N/A |
77+
| `prompt-file` | Path to a file containing the prompt. If both `prompt` and `prompt-file` are provided, `prompt-file` takes precedence | `""` |
4678
| `system-prompt` | The system prompt to send to the model | `""` |
4779
| `model` | The model to use for inference. Must be available in the [GitHub Models](https://github.com/marketplace?type=models) catalog | `gpt-4o` |
4880
| `endpoint` | The endpoint to use for inference. If you're running this as part of an org, you should probably use the org-specific Models endpoint | `https://models.github.ai/inference` |
@@ -52,9 +84,10 @@ the action:
5284

5385
The AI inference action provides the following outputs:
5486

55-
| Name | Description |
56-
| ---------- | --------------------------- |
57-
| `response` | The response from the model |
87+
| Name | Description |
88+
| --------------- | ----------------------------------------------------------------------- |
89+
| `response` | The response from the model |
90+
| `response-file` | The file path where the response is saved (useful for larger responses) |
5891

5992
## Required Permissions
6093

__tests__/main.test.ts

Lines changed: 46 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
*/
88
import { jest } from '@jest/globals'
99
import * as core from '../__fixtures__/core.js'
10-
1110
const mockPost = jest.fn().mockImplementation(() => ({
1211
body: {
1312
choices: [
@@ -29,44 +28,81 @@ jest.unstable_mockModule('@azure-rest/ai-inference', () => ({
2928
isUnexpected: jest.fn(() => false)
3029
}))
3130

31+
const mockExistsSync = jest.fn().mockReturnValue(true)
32+
const mockReadFileSync = jest.fn().mockReturnValue('Hello, AI!')
33+
34+
jest.unstable_mockModule('fs', () => ({
35+
existsSync: mockExistsSync,
36+
readFileSync: mockReadFileSync
37+
}))
38+
3239
jest.unstable_mockModule('@actions/core', () => core)
3340

3441
// The module being tested should be imported dynamically. This ensures that the
3542
// mocks are used in place of any actual dependencies.
3643
const { run } = await import('../src/main.js')
3744

3845
describe('main.ts', () => {
39-
beforeEach(() => {
46+
it('Sets the response output', async () => {
4047
// Set the action's inputs as return values from core.getInput().
4148
core.getInput.mockImplementation((name) => {
4249
if (name === 'prompt') return 'Hello, AI!'
4350
if (name === 'system_prompt') return 'You are a test assistant.'
4451
if (name === 'model_name') return 'gpt-4o'
4552
return ''
4653
})
47-
})
48-
49-
afterEach(() => {
50-
jest.resetAllMocks()
51-
})
5254

53-
it('Sets the response output', async () => {
5455
await run()
5556

5657
expect(core.setOutput).toHaveBeenNthCalledWith(
5758
1,
5859
'response',
5960
'Hello, user!'
6061
)
62+
63+
expect(core.setOutput).toHaveBeenNthCalledWith(
64+
2,
65+
'response-file',
66+
expect.stringContaining('modelResponse.txt')
67+
)
6168
})
6269

6370
it('Sets a failed status', async () => {
64-
// Clear the getInput mock and return an empty prompt
65-
core.getInput.mockClear().mockReturnValueOnce('')
71+
// Clear the getInput mock and simulate no prompt or prompt-file input
72+
core.getInput.mockImplementation((name) => {
73+
if (name === 'prompt') return ''
74+
if (name === 'prompt_file') return ''
75+
return ''
76+
})
6677

6778
await run()
6879

6980
// Verify that the action was marked as failed.
7081
expect(core.setFailed).toHaveBeenNthCalledWith(1, 'prompt is not set')
7182
})
83+
84+
it('uses prompt-file', async () => {
85+
const promptFile = 'prompt.txt'
86+
core.getInput.mockImplementation((name) => {
87+
if (name === 'prompt-file') return promptFile
88+
if (name === 'system-prompt') return 'You are a test assistant.'
89+
if (name === 'model-name') return 'gpt-4o'
90+
return ''
91+
})
92+
93+
await run()
94+
95+
expect(mockExistsSync).toHaveBeenCalledWith(promptFile)
96+
expect(mockReadFileSync).toHaveBeenCalledWith(promptFile, 'utf-8')
97+
expect(core.setOutput).toHaveBeenNthCalledWith(
98+
1,
99+
'response',
100+
'Hello, user!'
101+
)
102+
expect(core.setOutput).toHaveBeenNthCalledWith(
103+
2,
104+
'response-file',
105+
expect.stringContaining('modelResponse.txt')
106+
)
107+
})
72108
})

action.yml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,11 @@ branding:
1111
inputs:
1212
prompt:
1313
description: The prompt for the model
14-
required: true
14+
required: false
15+
default: ''
16+
prompt-file:
17+
description: Path to a file containing the prompt
18+
required: false
1519
default: ''
1620
model:
1721
description: The model to use
@@ -38,6 +42,8 @@ inputs:
3842
outputs:
3943
response:
4044
description: The response from the model
45+
response-file:
46+
description: The file path where the response is saved
4147

4248
runs:
4349
using: node20

0 commit comments

Comments
 (0)