Skip to content

Commit ff5ce3f

Browse files
committed
Copy src from ai-inference
1 parent 30d54fc commit ff5ce3f

File tree

6 files changed

+691
-0
lines changed

6 files changed

+691
-0
lines changed

src/helpers.ts

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
import * as core from '@actions/core'
2+
import { GetChatCompletionsDefaultResponse } from '@azure-rest/ai-inference'
3+
import * as fs from 'fs'
4+
import { PromptConfig } from './prompt.js'
5+
import { InferenceRequest } from './inference.js'
6+
7+
/**
8+
* Helper function to load content from a file or use fallback input
9+
* @param filePathInput - Input name for the file path
10+
* @param contentInput - Input name for the direct content
11+
* @param defaultValue - Default value to use if neither file nor content is provided
12+
* @returns The loaded content
13+
*/
14+
export function loadContentFromFileOrInput(
15+
filePathInput: string,
16+
contentInput: string,
17+
defaultValue?: string
18+
): string {
19+
const filePath = core.getInput(filePathInput)
20+
const contentString = core.getInput(contentInput)
21+
22+
if (filePath !== undefined && filePath !== '') {
23+
if (!fs.existsSync(filePath)) {
24+
throw new Error(`File for ${filePathInput} was not found: ${filePath}`)
25+
}
26+
return fs.readFileSync(filePath, 'utf-8')
27+
} else if (contentString !== undefined && contentString !== '') {
28+
return contentString
29+
} else if (defaultValue !== undefined) {
30+
return defaultValue
31+
} else {
32+
throw new Error(`Neither ${filePathInput} nor ${contentInput} was set`)
33+
}
34+
}
35+
36+
/**
37+
* Helper function to handle unexpected responses from AI service
38+
* @param response - The response object from the AI service
39+
* @throws Error with appropriate error message based on response content
40+
*/
41+
export function handleUnexpectedResponse(
42+
response: GetChatCompletionsDefaultResponse
43+
): never {
44+
// Extract x-ms-error-code from headers if available
45+
const errorCode = response.headers['x-ms-error-code']
46+
const errorCodeMsg = errorCode ? ` (error code: ${errorCode})` : ''
47+
48+
// Check if response body exists and contains error details
49+
if (response.body && response.body.error) {
50+
throw response.body.error
51+
}
52+
53+
// Handle case where response body is missing
54+
if (!response.body) {
55+
throw new Error(
56+
`Failed to get response from AI service (status: ${response.status})${errorCodeMsg}. ` +
57+
'Please check network connection and endpoint configuration.'
58+
)
59+
}
60+
61+
// Handle other error cases
62+
throw new Error(
63+
`AI service returned error response (status: ${response.status})${errorCodeMsg}: ` +
64+
(typeof response.body === 'string'
65+
? response.body
66+
: JSON.stringify(response.body))
67+
)
68+
}
69+
70+
/**
71+
* Build messages array from either prompt config or legacy format
72+
*/
73+
export function buildMessages(
74+
promptConfig?: PromptConfig,
75+
systemPrompt?: string,
76+
prompt?: string
77+
): Array<{ role: string; content: string }> {
78+
if (promptConfig?.messages && promptConfig.messages.length > 0) {
79+
// Use new message format
80+
return promptConfig.messages.map((msg) => ({
81+
role: msg.role,
82+
content: msg.content
83+
}))
84+
} else {
85+
// Use legacy format
86+
return [
87+
{
88+
role: 'system',
89+
content: systemPrompt || 'You are a helpful assistant'
90+
},
91+
{ role: 'user', content: prompt || '' }
92+
]
93+
}
94+
}
95+
96+
/**
97+
* Build response format object for API from prompt config
98+
*/
99+
export function buildResponseFormat(
100+
promptConfig?: PromptConfig
101+
): { type: 'json_schema'; json_schema: unknown } | undefined {
102+
if (
103+
promptConfig?.responseFormat === 'json_schema' &&
104+
promptConfig.jsonSchema
105+
) {
106+
try {
107+
const schema = JSON.parse(promptConfig.jsonSchema)
108+
return {
109+
type: 'json_schema',
110+
json_schema: schema
111+
}
112+
} catch (error) {
113+
throw new Error(
114+
`Invalid JSON schema: ${error instanceof Error ? error.message : 'Unknown error'}`
115+
)
116+
}
117+
}
118+
return undefined
119+
}
120+
121+
/**
122+
* Build complete InferenceRequest from prompt config and inputs
123+
*/
124+
export function buildInferenceRequest(
125+
promptConfig: PromptConfig | undefined,
126+
systemPrompt: string | undefined,
127+
prompt: string | undefined,
128+
modelName: string,
129+
maxTokens: number,
130+
endpoint: string,
131+
token: string
132+
): InferenceRequest {
133+
const messages = buildMessages(promptConfig, systemPrompt, prompt)
134+
const responseFormat = buildResponseFormat(promptConfig)
135+
136+
return {
137+
messages,
138+
modelName,
139+
maxTokens,
140+
endpoint,
141+
token,
142+
responseFormat
143+
}
144+
}

src/index.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
/**
2+
* The entrypoint for the action. This file simply imports and runs the action's
3+
* main logic.
4+
*/
5+
import { run } from './main.js'
6+
7+
/* istanbul ignore next */
8+
run()

src/inference.ts

Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
import * as core from '@actions/core'
2+
import ModelClient, { isUnexpected } from '@azure-rest/ai-inference'
3+
import { AzureKeyCredential } from '@azure/core-auth'
4+
import { GitHubMCPClient, executeToolCalls, MCPTool, ToolCall } from './mcp.js'
5+
import { handleUnexpectedResponse } from './helpers.js'
6+
7+
interface ChatMessage {
8+
role: string
9+
content: string | null
10+
tool_calls?: ToolCall[]
11+
}
12+
13+
interface ChatCompletionsRequestBody {
14+
messages: ChatMessage[]
15+
max_tokens: number
16+
model: string
17+
response_format?: { type: 'json_schema'; json_schema: unknown }
18+
tools?: MCPTool[]
19+
}
20+
21+
export interface InferenceRequest {
22+
messages: Array<{ role: string; content: string }>
23+
modelName: string
24+
maxTokens: number
25+
endpoint: string
26+
token: string
27+
responseFormat?: { type: 'json_schema'; json_schema: unknown } // Processed response format for the API
28+
}
29+
30+
export interface InferenceResponse {
31+
content: string | null
32+
toolCalls?: Array<{
33+
id: string
34+
type: string
35+
function: {
36+
name: string
37+
arguments: string
38+
}
39+
}>
40+
}
41+
42+
/**
43+
* Simple one-shot inference without tools
44+
*/
45+
export async function simpleInference(
46+
request: InferenceRequest
47+
): Promise<string | null> {
48+
core.info('Running simple inference without tools')
49+
50+
const client = ModelClient(
51+
request.endpoint,
52+
new AzureKeyCredential(request.token),
53+
{
54+
userAgentOptions: { userAgentPrefix: 'github-actions-ai-inference' }
55+
}
56+
)
57+
58+
const requestBody: ChatCompletionsRequestBody = {
59+
messages: request.messages,
60+
max_tokens: request.maxTokens,
61+
model: request.modelName
62+
}
63+
64+
// Add response format if specified
65+
if (request.responseFormat) {
66+
requestBody.response_format = request.responseFormat
67+
}
68+
69+
const response = await client.path('/chat/completions').post({
70+
body: requestBody
71+
})
72+
73+
if (isUnexpected(response)) {
74+
handleUnexpectedResponse(response)
75+
}
76+
77+
const modelResponse = response.body.choices[0].message.content
78+
core.info(`Model response: ${modelResponse || 'No response content'}`)
79+
80+
return modelResponse
81+
}
82+
83+
/**
84+
* GitHub MCP-enabled inference with tool execution loop
85+
*/
86+
export async function mcpInference(
87+
request: InferenceRequest,
88+
githubMcpClient: GitHubMCPClient
89+
): Promise<string | null> {
90+
core.info('Running GitHub MCP inference with tools')
91+
92+
const client = ModelClient(
93+
request.endpoint,
94+
new AzureKeyCredential(request.token),
95+
{
96+
userAgentOptions: { userAgentPrefix: 'github-actions-ai-inference' }
97+
}
98+
)
99+
100+
// Start with the pre-processed messages
101+
const messages: ChatMessage[] = [...request.messages]
102+
103+
let iterationCount = 0
104+
const maxIterations = 5 // Prevent infinite loops
105+
106+
while (iterationCount < maxIterations) {
107+
iterationCount++
108+
core.info(`MCP inference iteration ${iterationCount}`)
109+
110+
const requestBody: ChatCompletionsRequestBody = {
111+
messages: messages,
112+
max_tokens: request.maxTokens,
113+
model: request.modelName,
114+
tools: githubMcpClient.tools
115+
}
116+
117+
// Add response format if specified (only on first iteration to avoid conflicts)
118+
if (iterationCount === 1 && request.responseFormat) {
119+
requestBody.response_format = request.responseFormat
120+
}
121+
122+
const response = await client.path('/chat/completions').post({
123+
body: requestBody
124+
})
125+
126+
if (isUnexpected(response)) {
127+
handleUnexpectedResponse(response)
128+
}
129+
130+
const assistantMessage = response.body.choices[0].message
131+
const modelResponse = assistantMessage.content
132+
const toolCalls = assistantMessage.tool_calls
133+
134+
core.info(`Model response: ${modelResponse || 'No response content'}`)
135+
136+
messages.push({
137+
role: 'assistant',
138+
content: modelResponse || '',
139+
...(toolCalls && { tool_calls: toolCalls })
140+
})
141+
142+
if (!toolCalls || toolCalls.length === 0) {
143+
core.info('No tool calls requested, ending GitHub MCP inference loop')
144+
return modelResponse
145+
}
146+
147+
core.info(`Model requested ${toolCalls.length} tool calls`)
148+
149+
// Execute all tool calls via GitHub MCP
150+
const toolResults = await executeToolCalls(
151+
githubMcpClient.client,
152+
toolCalls
153+
)
154+
155+
// Add tool results to the conversation
156+
messages.push(...toolResults)
157+
158+
core.info('Tool results added, continuing conversation...')
159+
}
160+
161+
core.warning(
162+
`GitHub MCP inference loop exceeded maximum iterations (${maxIterations})`
163+
)
164+
165+
// Return the last assistant message content
166+
const lastAssistantMessage = messages
167+
.slice()
168+
.reverse()
169+
.find((msg) => msg.role === 'assistant')
170+
171+
return lastAssistantMessage?.content || null
172+
}

0 commit comments

Comments
 (0)