Skip to content

Commit 8f64ac1

Browse files
committed
Fixup types and tests
1 parent 1f89e94 commit 8f64ac1

File tree

4 files changed

+72
-46
lines changed

4 files changed

+72
-46
lines changed

__tests__/main-prompt-integration.test.ts

Lines changed: 47 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,42 @@
11
import { describe, it, expect, beforeEach, jest } from '@jest/globals'
2-
import * as core from '@actions/core'
3-
import * as fs from 'fs'
4-
import * as path from 'path'
5-
import { fileURLToPath } from 'url'
6-
import { run } from '../src/main'
7-
8-
const __filename = fileURLToPath(import.meta.url)
9-
const __dirname = path.dirname(__filename)
10-
11-
// Mock the action toolkit functions
12-
jest.mock('@actions/core')
13-
14-
// Mock fs to handle temporary file creation
15-
jest.mock('fs')
2+
import * as core from '../__fixtures__/core.js'
3+
4+
// Create fs mocks
5+
const mockExistsSync = jest.fn()
6+
const mockReadFileSync = jest.fn()
7+
const mockWriteFileSync = jest.fn()
8+
9+
// Create inference mocks
10+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
11+
const mockSimpleInference = jest.fn() as jest.MockedFunction<any>
12+
const mockMcpInference = jest.fn()
13+
14+
// Create MCP mocks
15+
const mockConnectToGitHubMCP = jest.fn()
16+
17+
// Mock fs module
18+
jest.unstable_mockModule('fs', () => ({
19+
existsSync: mockExistsSync,
20+
readFileSync: mockReadFileSync,
21+
writeFileSync: mockWriteFileSync
22+
}))
1623

1724
// Mock the inference functions
18-
jest.mock('../src/inference', () => ({
19-
simpleInference: jest.fn(),
20-
mcpInference: jest.fn()
25+
jest.unstable_mockModule('../src/inference.js', () => ({
26+
simpleInference: mockSimpleInference,
27+
mcpInference: mockMcpInference
2128
}))
2229

2330
// Mock the MCP connection
24-
jest.mock('../src/mcp', () => ({
25-
connectToGitHubMCP: jest.fn()
31+
jest.unstable_mockModule('../src/mcp.js', () => ({
32+
connectToGitHubMCP: mockConnectToGitHubMCP
2633
}))
2734

28-
import { simpleInference } from '../src/inference'
35+
jest.unstable_mockModule('@actions/core', () => core)
36+
37+
// The module being tested should be imported dynamically. This ensures that the
38+
// mocks are used in place of any actual dependencies.
39+
const { run } = await import('../src/main.js')
2940

3041
describe('main.ts - prompt.yml integration', () => {
3142
beforeEach(() => {
@@ -35,8 +46,7 @@ describe('main.ts - prompt.yml integration', () => {
3546
process.env['GITHUB_TOKEN'] = 'test-token'
3647

3748
// Mock core.getInput to return appropriate values
38-
const mockGetInput = core.getInput as jest.Mock
39-
mockGetInput.mockImplementation((name: string) => {
49+
core.getInput.mockImplementation((name: string) => {
4050
switch (name) {
4151
case 'model':
4252
return 'openai/gpt-4o'
@@ -55,12 +65,7 @@ describe('main.ts - prompt.yml integration', () => {
5565
const mockGetBooleanInput = core.getBooleanInput as jest.Mock
5666
mockGetBooleanInput.mockReturnValue(false)
5767

58-
// Mock fs.existsSync
59-
const mockExistsSync = fs.existsSync as jest.Mock
60-
mockExistsSync.mockReturnValue(true)
61-
6268
// Mock fs.readFileSync for prompt file
63-
const mockReadFileSync = fs.readFileSync as jest.Mock
6469
mockReadFileSync.mockReturnValue(`
6570
messages:
6671
- role: system
@@ -71,17 +76,15 @@ model: openai/gpt-4o
7176
`)
7277

7378
// Mock fs.writeFileSync
74-
const mockWriteFileSync = fs.writeFileSync as jest.Mock
7579
mockWriteFileSync.mockImplementation(() => {})
7680

7781
// Mock simpleInference
78-
const mockSimpleInference = simpleInference as jest.Mock
7982
mockSimpleInference.mockResolvedValue('Mocked AI response')
8083
})
8184

8285
it('should handle prompt YAML files with template variables', async () => {
83-
const mockGetInput = core.getInput as jest.Mock
84-
mockGetInput.mockImplementation((name: string) => {
86+
mockExistsSync.mockReturnValue(true)
87+
core.getInput.mockImplementation((name: string) => {
8588
switch (name) {
8689
case 'prompt-file':
8790
return 'test.prompt.yml'
@@ -103,7 +106,6 @@ model: openai/gpt-4o
103106
await run()
104107

105108
// Verify simpleInference was called with the correct message structure
106-
const mockSimpleInference = simpleInference as jest.Mock
107109
expect(mockSimpleInference).toHaveBeenCalledWith(
108110
expect.objectContaining({
109111
messages: [
@@ -135,8 +137,8 @@ model: openai/gpt-4o
135137
})
136138

137139
it('should fall back to legacy format when not using prompt YAML', async () => {
138-
const mockGetInput = core.getInput as jest.Mock
139-
mockGetInput.mockImplementation((name: string) => {
140+
mockExistsSync.mockReturnValue(false)
141+
core.getInput.mockImplementation((name: string) => {
140142
switch (name) {
141143
case 'prompt':
142144
return 'Hello, world!'
@@ -157,12 +159,19 @@ model: openai/gpt-4o
157159

158160
await run()
159161

160-
// Verify simpleInference was called with legacy format
161-
const mockSimpleInference = simpleInference as jest.Mock
162+
// Verify simpleInference was called with converted message format
162163
expect(mockSimpleInference).toHaveBeenCalledWith(
163164
expect.objectContaining({
164-
systemPrompt: 'You are helpful',
165-
prompt: 'Hello, world!',
165+
messages: [
166+
{
167+
role: 'system',
168+
content: 'You are helpful'
169+
},
170+
{
171+
role: 'user',
172+
content: 'Hello, world!'
173+
}
174+
],
166175
modelName: 'openai/gpt-4o',
167176
maxTokens: 200,
168177
endpoint: 'https://models.github.ai/inference',

src/helpers.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,9 @@ export function buildMessages(
9696
/**
9797
* Build response format object for API from prompt config
9898
*/
99-
export function buildResponseFormat(promptConfig?: PromptConfig): any {
99+
export function buildResponseFormat(
100+
promptConfig?: PromptConfig
101+
): { type: 'json_schema'; json_schema: unknown } | undefined {
100102
if (
101103
promptConfig?.responseFormat === 'json_schema' &&
102104
promptConfig.jsonSchema

src/inference.ts

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,30 @@
11
import * as core from '@actions/core'
22
import ModelClient, { isUnexpected } from '@azure-rest/ai-inference'
33
import { AzureKeyCredential } from '@azure/core-auth'
4-
import { GitHubMCPClient, executeToolCalls } from './mcp.js'
4+
import { GitHubMCPClient, executeToolCalls, MCPTool, ToolCall } from './mcp.js'
55
import { handleUnexpectedResponse } from './helpers.js'
66

7+
interface ChatMessage {
8+
role: string
9+
content: string | null
10+
tool_calls?: ToolCall[]
11+
}
12+
13+
interface ChatCompletionsRequestBody {
14+
messages: ChatMessage[]
15+
max_tokens: number
16+
model: string
17+
response_format?: { type: 'json_schema'; json_schema: unknown }
18+
tools?: MCPTool[]
19+
}
20+
721
export interface InferenceRequest {
822
messages: Array<{ role: string; content: string }>
923
modelName: string
1024
maxTokens: number
1125
endpoint: string
1226
token: string
13-
responseFormat?: any // Will contain the processed response format for the API
27+
responseFormat?: { type: 'json_schema'; json_schema: unknown } // Processed response format for the API
1428
}
1529

1630
export interface InferenceResponse {
@@ -41,7 +55,7 @@ export async function simpleInference(
4155
}
4256
)
4357

44-
const requestBody: any = {
58+
const requestBody: ChatCompletionsRequestBody = {
4559
messages: request.messages,
4660
max_tokens: request.maxTokens,
4761
model: request.modelName
@@ -84,7 +98,7 @@ export async function mcpInference(
8498
)
8599

86100
// Start with the pre-processed messages
87-
const messages: Array<any> = [...request.messages]
101+
const messages: ChatMessage[] = [...request.messages]
88102

89103
let iterationCount = 0
90104
const maxIterations = 5 // Prevent infinite loops
@@ -93,7 +107,7 @@ export async function mcpInference(
93107
iterationCount++
94108
core.info(`MCP inference iteration ${iterationCount}`)
95109

96-
const requestBody: any = {
110+
const requestBody: ChatCompletionsRequestBody = {
97111
messages: messages,
98112
max_tokens: request.maxTokens,
99113
model: request.modelName,

src/main.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@ import { loadContentFromFileOrInput, buildInferenceRequest } from './helpers.js'
88
import {
99
loadPromptFile,
1010
parseTemplateVariables,
11-
isPromptYamlFile
11+
isPromptYamlFile,
12+
PromptConfig
1213
} from './prompt.js'
1314

1415
const RESPONSE_FILE = 'modelResponse.txt'
@@ -23,7 +24,7 @@ export async function run(): Promise<void> {
2324
const promptFilePath = core.getInput('prompt-file')
2425
const inputVariables = core.getInput('input')
2526

26-
let promptConfig: any = undefined
27+
let promptConfig: PromptConfig | undefined = undefined
2728
let systemPrompt: string | undefined = undefined
2829
let prompt: string | undefined = undefined
2930

0 commit comments

Comments
 (0)