Skip to content

Commit e2e16dc

Browse files
kevin-lanngithub-actions[bot]
authored andcommitted
Auto-formatted the code using Prettier
1 parent e28d219 commit e2e16dc

File tree

3 files changed

+66
-56
lines changed

3 files changed

+66
-56
lines changed

course-matrix/backend/src/constants/availableFunctions.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@ export const availableFunctions: AvailableFunctions = {
3636

3737
return { status: 200, data: timetableData };
3838
} catch (error) {
39-
console.log(error)
39+
console.log(error);
4040
return { status: 400, error: error };
4141
}
4242
},
43-
};
43+
};

course-matrix/backend/src/controllers/aiController.ts

Lines changed: 63 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,15 @@
11
import asyncHandler from "../middleware/asyncHandler";
22
import { Request, Response } from "express";
33
import { createOpenAI } from "@ai-sdk/openai";
4-
import { CoreMessage, generateObject, InvalidToolArgumentsError, NoSuchToolError, streamText, tool, ToolExecutionError } from "ai";
4+
import {
5+
CoreMessage,
6+
generateObject,
7+
InvalidToolArgumentsError,
8+
NoSuchToolError,
9+
streamText,
10+
tool,
11+
ToolExecutionError,
12+
} from "ai";
513
import { Index, Pinecone, RecordMetadata } from "@pinecone-database/pinecone";
614
import { PineconeStore } from "@langchain/pinecone";
715
import { OpenAIEmbeddings } from "@langchain/openai";
@@ -232,7 +240,7 @@ async function reformulateQuery(
232240
content: latestQuery,
233241
});
234242

235-
console.log(messages)
243+
console.log(messages);
236244

237245
const response = await openai2.chat.completions.create({
238246
model: "gpt-4o-mini",
@@ -322,10 +330,10 @@ export const chat = asyncHandler(async (req: Request, res: Response) => {
322330
if (latestMessage.startsWith(CHATBOT_TIMETABLE_CMD)) {
323331
// ----- Flow 1 - Agent performs action on timetable -----
324332

325-
// Get a new response from the model with all the tool responses
326-
const result = streamText({
327-
model: openai("gpt-4o-mini"),
328-
system: `# Morpheus - Course Matrix Assistant
333+
// Get a new response from the model with all the tool responses
334+
const result = streamText({
335+
model: openai("gpt-4o-mini"),
336+
system: `# Morpheus - Course Matrix Assistant
329337
330338
## Identity & Purpose
331339
You are Morpheus, the official AI assistant for Course Matrix, an AI-powered platform that helps University of Toronto Scarborough (UTSC) students plan their academic journey.
@@ -350,54 +358,56 @@ export const chat = asyncHandler(async (req: Request, res: Response) => {
350358
- If information is missing from the context but likely exists, try to use info from web to answer. If still not able to form a decent response, acknowledge the limitation
351359
- For unrelated questions, politely explain that you're specialized in UTSC academic information
352360
`,
353-
messages,
354-
tools: {
355-
getTimetables: tool({
356-
description: "Get all the timetables of the currently logged in user.",
357-
parameters: z.object({}),
358-
execute: async (args) => {
359-
return await availableFunctions.getTimetables(args, req);
360-
}
361-
})
362-
},
363-
maxSteps: 3, // Controls how many back and forths the model can take with user or calling multiple tools
364-
experimental_repairToolCall: async ({
365-
toolCall,
366-
tools,
367-
parameterSchema,
368-
error,
369-
}) => {
370-
if (NoSuchToolError.isInstance(error)) {
371-
return null; // do not attempt to fix invalid tool names
372-
}
373-
374-
const tool = tools[toolCall.toolName as keyof typeof tools];
375-
console.log(`The model tried to call the tool "${toolCall.toolName}"` +
376-
` with the following arguments:`,
377-
JSON.stringify(toolCall.args),
378-
`The tool accepts the following schema:`,
379-
JSON.stringify(parameterSchema(toolCall)),
380-
'Please fix the arguments.')
381-
382-
const { object: repairedArgs } = await generateObject({
383-
model: openai('gpt-4o', { structuredOutputs: true }),
384-
schema: tool.parameters,
385-
prompt: [
386-
`The model tried to call the tool "${toolCall.toolName}"` +
387-
` with the following arguments:`,
388-
JSON.stringify(toolCall.args),
389-
`The tool accepts the following schema:`,
390-
JSON.stringify(parameterSchema(toolCall)),
391-
'Please fix the arguments.',
392-
].join('\n'),
393-
});
394-
395-
return { ...toolCall, args: JSON.stringify(repairedArgs) };
396-
},
397-
});
361+
messages,
362+
tools: {
363+
getTimetables: tool({
364+
description:
365+
"Get all the timetables of the currently logged in user.",
366+
parameters: z.object({}),
367+
execute: async (args) => {
368+
return await availableFunctions.getTimetables(args, req);
369+
},
370+
}),
371+
},
372+
maxSteps: 3, // Controls how many back and forths the model can take with user or calling multiple tools
373+
experimental_repairToolCall: async ({
374+
toolCall,
375+
tools,
376+
parameterSchema,
377+
error,
378+
}) => {
379+
if (NoSuchToolError.isInstance(error)) {
380+
return null; // do not attempt to fix invalid tool names
381+
}
398382

399-
result.pipeDataStreamToResponse(res);
400-
383+
const tool = tools[toolCall.toolName as keyof typeof tools];
384+
console.log(
385+
`The model tried to call the tool "${toolCall.toolName}"` +
386+
` with the following arguments:`,
387+
JSON.stringify(toolCall.args),
388+
`The tool accepts the following schema:`,
389+
JSON.stringify(parameterSchema(toolCall)),
390+
"Please fix the arguments.",
391+
);
392+
393+
const { object: repairedArgs } = await generateObject({
394+
model: openai("gpt-4o", { structuredOutputs: true }),
395+
schema: tool.parameters,
396+
prompt: [
397+
`The model tried to call the tool "${toolCall.toolName}"` +
398+
` with the following arguments:`,
399+
JSON.stringify(toolCall.args),
400+
`The tool accepts the following schema:`,
401+
JSON.stringify(parameterSchema(toolCall)),
402+
"Please fix the arguments.",
403+
].join("\n"),
404+
});
405+
406+
return { ...toolCall, args: JSON.stringify(repairedArgs) };
407+
},
408+
});
409+
410+
result.pipeDataStreamToResponse(res);
401411
} else {
402412
// ----- Flow 2 - Answer query -----
403413

course-matrix/frontend/src/pages/Assistant/runtime-provider.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ export function RuntimeProvider({
6161
const runtime = useChatRuntime({
6262
cloud,
6363
api: `${SERVER_URL}/api/ai/chat`,
64-
credentials: "include"
64+
credentials: "include",
6565
});
6666

6767
const contextValue = {

0 commit comments

Comments
 (0)