forked from nisalgunawardhana/Github-models-starter-pro
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsample-tools.js
More file actions
159 lines (143 loc) · 5.73 KB
/
sample-tools.js
File metadata and controls
159 lines (143 loc) · 5.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
/**
* FUNCTION CALLING / TOOL USAGE DEMONSTRATION
*
* This file demonstrates the advanced function calling capabilities of GPT-4o, allowing the AI
* to interact with external functions and tools to provide more accurate and dynamic responses.
* The application:
* 1. Defines a mock flight information function that returns flight details
* 2. Registers this function as a "tool" that the AI can call
* 3. Sends a user query that requires flight information
* 4. Handles the AI's request to call the function
* 5. Provides the function result back to the AI for final response formatting
*
* Key concepts demonstrated:
* - Function/tool definition with JSON schema
* - Tool registration and parameter specification
* - Multi-step conversation flow with function calls
* - Dynamic function execution based on AI requests
* - Result integration and response formatting
*
* This showcases how AI models can be extended with external capabilities,
* making them more practical for real-world applications requiring live data
* or specific computations.
*/
import OpenAI from "openai";
import dotenv from "dotenv";
// Load environment variables for API authentication
dotenv.config();
const token = process.env["GITHUB_TOKEN"];
const endpoint = "https://models.github.ai/inference";
const modelName = "openai/gpt-4o";
/**
* Mock function to simulate flight information lookup
* In a real application, this would connect to actual flight APIs
*
* @param {Object} params - Flight search parameters
* @param {string} params.originCity - Departure city
* @param {string} params.destinationCity - Arrival city
* @returns {string} JSON string with flight information or error
*/
function getFlightInfo({originCity, destinationCity}){
// Mock data for demonstration - would be real API call in production
if (originCity === "Seattle" && destinationCity === "Miami"){
return JSON.stringify({
airline: "Delta",
flight_number: "DL123",
flight_date: "July 16th, 2025",
flight_time: "10:00AM"
});
}
return JSON.stringify({error: "No flights found between the cities"});
}
/**
* Function registry mapping function names to their implementations
* This allows dynamic function calling based on AI requests
*/
const namesToFunctions = {
getFlightInfo: (data) =>
getFlightInfo(data),
};
export async function main() {
// Define the tool/function schema that the AI can use
const tool = {
"type": "function",
"function": {
name: "getFlightInfo",
// Description helps the AI understand when and how to use this function
description: "Returns information about the next flight between two cities." +
"This includes the name of the airline, flight number and the date and time" +
"of the next flight",
// JSON schema defining the required parameters
parameters: {
"type": "object",
"properties": {
"originCity": {
"type": "string",
"description": "The name of the city where the flight originates",
},
"destinationCity": {
"type": "string",
"description": "The flight destination city",
},
},
"required": [
"originCity",
"destinationCity"
],
}
}
};
// Initialize OpenAI client
const client = new OpenAI({ baseURL: endpoint, apiKey: token });
// Initialize conversation with system message and user query
let messages=[
{role: "system", content: "You an assistant that helps users find flight information."},
{role: "user", content: "I'm interested in going to Miami. What is the next flight there from Seattle?"},
];
// Send initial request with tool availability
let response = await client.chat.completions.create({
messages: messages,
tools: [tool], // Make the flight info function available to the AI
model: modelName
});
// Check if the AI wants to call our function
if (response.choices[0].finish_reason === "tool_calls"){
// Add the AI's response (including tool call request) to conversation history
messages.push(response.choices[0].message);
// Process the tool call (expecting one function call)
if (response.choices[0].message && response.choices[0].message.tool_calls.length === 1){
const toolCall = response.choices[0].message.tool_calls[0];
// Verify it's a function call as expected
if (toolCall.type === "function"){
const toolCall = response.choices[0].message.tool_calls[0];
// Extract function arguments and execute the function
const functionArgs = JSON.parse(toolCall.function.arguments);
console.log(`Calling function \`${toolCall.function.name}\` with arguments ${toolCall.function.arguments}`);
const callableFunc = namesToFunctions[toolCall.function.name];
const functionReturn = callableFunc(functionArgs);
console.log(`Function returned = ${functionReturn}`);
// Add the function result to the conversation
messages.push(
{
"tool_call_id": toolCall.id,
"role": "tool",
"name": toolCall.function.name,
"content": functionReturn,
}
)
// Send the complete conversation (with function result) back to AI for final response
response = await client.chat.completions.create({
messages: messages,
tools: [tool],
model: modelName
});
// Display the AI's final response that incorporates the function result
console.log(`Model response = ${response.choices[0].message.content}`);
}
}
}
}
// Execute main function with error handling
main().catch((err) => {
console.error("The sample encountered an error:", err);
});