|
| 1 | +import OpenAI from "openai"; |
| 2 | +import { GPTModel } from "./enums"; |
| 3 | +import { FileObject } from "openai/resources"; |
| 4 | +import path, { resolve } from "path"; |
| 5 | +import fs from "fs"; |
| 6 | +import { |
| 7 | + AssistantBody, |
| 8 | + GPTResponse, |
| 9 | + ThreadMessage, |
| 10 | + GPTData, |
| 11 | + Testing, |
| 12 | + TestLocation, |
| 13 | +} from "./types"; |
| 14 | +import { prompt, questions } from "./prompts.data"; |
| 15 | +import { threadId } from "worker_threads"; |
| 16 | + |
| 17 | + |
| 18 | +export class GPTController { |
| 19 | + private static client: OpenAI; |
| 20 | + private model: GPTModel; |
| 21 | + |
| 22 | + constructor(model: GPTModel) { |
| 23 | + if (!GPTController.client) { |
| 24 | + GPTController.client = new OpenAI({ |
| 25 | + apiKey: process.env.OPEN_API_KEY, // This is the default and can be omitted |
| 26 | + }); |
| 27 | + } |
| 28 | + this.model = model; |
| 29 | + } |
| 30 | + |
| 31 | + async runGPTAnalysis(filePaths: string[]): Promise<GPTResponse[]> { |
| 32 | + const assistantParams: AssistantBody = { |
| 33 | + name: "Radiation Effects Researcher", |
| 34 | + instructions: |
| 35 | + "You are a radiation effects reasearcher. Use your knowledge to give very concise and numerical answers to the questions. Please do not give citations.", |
| 36 | + model: this.model, |
| 37 | + tools: [{ type: "file_search" }], |
| 38 | + temperature: 0.1, |
| 39 | + }; |
| 40 | + |
| 41 | + // Perhaps this should be pulled out to another function |
| 42 | + const results: GPTResponse[] = []; |
| 43 | + |
| 44 | + // Upload files and create threads concurrently |
| 45 | + const fileThreads = filePaths.map(async (filePath: string) => { |
| 46 | + // Pretty sure we need an assistant for each thread to keep it separated. |
| 47 | + const fileID = await this.uploadFile(filePath); |
| 48 | + const threadMessage: ThreadMessage = { |
| 49 | + role: "assistant", |
| 50 | + content: prompt + questions, |
| 51 | + attachments: [{ file_id: fileID, tools: [{ type: "file_search" }] }], |
| 52 | + }; |
| 53 | + //console.log(`Thread Message: ${threadMessage}`) |
| 54 | + // Create the three threads for each paper |
| 55 | + let threadResults: GPTData[] = []; |
| 56 | + //const loopPromises = Array.from({ length: 1 }, async (_) => { // FOR TESTING |
| 57 | + const loopPromises = Array.from({ length: 3 }, async (_) => { |
| 58 | + const assistant = await this.createAssistant(assistantParams); |
| 59 | + const thread = await this.createThread(threadMessage); |
| 60 | + |
| 61 | + // Run the assistant on the thread and get the prompt results |
| 62 | + let run = await GPTController.client.beta.threads.runs.createAndPoll( |
| 63 | + thread.id, |
| 64 | + { |
| 65 | + assistant_id: assistant.id, |
| 66 | + }, |
| 67 | + ); |
| 68 | + if (run.status == "completed") { |
| 69 | + const messages = |
| 70 | + await GPTController.client.beta.threads.messages.list( |
| 71 | + run.thread_id, |
| 72 | + ); |
| 73 | + // console.log("Tokens used: ", run.usage) |
| 74 | + var n = 1; |
| 75 | + for (const message of messages.data.reverse()) { |
| 76 | + if (message.content[0].type == "text") { |
| 77 | + // Need to check if the message content is text before parsing it |
| 78 | + var result = message.content[0].text.value; |
| 79 | + //console.log("Result: ", result) // FOR DEBUGGING |
| 80 | + if (n % 2 == 0) { |
| 81 | + // Every second message has the data values |
| 82 | + // console.log(`${message.role} > ${result}`); // FOR DEBUGGING |
| 83 | + let preres = result |
| 84 | + .split("ø") |
| 85 | + .map((s) => s.replace("\n", "") && s.replace(/^\s+/g, "")); // Trimming each string |
| 86 | + console.log("After split: ", preres); |
| 87 | + var resvalues: GPTData = { |
| 88 | + paper_name: preres[0], |
| 89 | + year: parseInt(preres[1]), |
| 90 | + author: preres[2] |
| 91 | + .split("¶") |
| 92 | + .map((s) => s.replace(/^\s+/g, "")), |
| 93 | + part_no: preres[3], |
| 94 | + type: preres[4], |
| 95 | + manufacturer: preres[5], |
| 96 | + testing_location: <TestLocation>preres[6], |
| 97 | + |
| 98 | + testing_type: <Testing>preres[7], |
| 99 | + // TODO: preres[7] is a list ("TID, TID, DD") if the paper has more than one testing type, so the cast may fail |
| 100 | + // Produces weird output: "SEE【4:0†source】" |
| 101 | + data_type: 0, // TODO: Need to be removed hear, from the defined data types and in db controller |
| 102 | + }; |
| 103 | + console.log(resvalues); |
| 104 | + threadResults.push(resvalues); |
| 105 | + } |
| 106 | + n++; |
| 107 | + } |
| 108 | + } |
| 109 | + } else { |
| 110 | + console.log(run.status); |
| 111 | + } |
| 112 | + }); |
| 113 | + |
| 114 | + // Wait for all loop iterations to finish |
| 115 | + await Promise.all(loopPromises); |
| 116 | + |
| 117 | + const threadFinal: GPTResponse = { |
| 118 | + pass_1: threadResults[0], |
| 119 | + pass_2: threadResults[1], |
| 120 | + pass_3: threadResults[2], |
| 121 | + }; |
| 122 | + //console.log(threadFinal) |
| 123 | + results.push(threadFinal); |
| 124 | + }); |
| 125 | + |
| 126 | + await Promise.all(fileThreads); |
| 127 | + console.log("All threads completed!"); |
| 128 | + return results; |
| 129 | + } |
| 130 | + |
| 131 | + /* |
| 132 | + * Parameters: |
| 133 | + * - fp: the path of the uploaded files |
| 134 | + * Function: Uploads the given file to the OpenAI client |
| 135 | + * Returns: |
| 136 | + * - number: The ID of the file uploaded to OpenAI API |
| 137 | + */ |
| 138 | + private async uploadFile(fp: string): Promise<string> { |
| 139 | + const filePath = path.resolve(fp); |
| 140 | + const fileStream = fs.createReadStream(filePath); |
| 141 | + |
| 142 | + const response = await GPTController.client.files.create({ |
| 143 | + file: fileStream, |
| 144 | + purpose: "assistants", |
| 145 | + }); |
| 146 | + console.log("uploadFile: ", response); |
| 147 | + return response.id; // Return the uploaded file ID |
| 148 | + } |
| 149 | + |
| 150 | + /* |
| 151 | + * Parameters: |
| 152 | + * - assistantDetails: an instance of AssistantBody containing the required info to create an assistant |
| 153 | + * Function: Creates a new assistant |
| 154 | + * Returns: |
| 155 | + * - OpenAI.Beta.Assistants.Assistant: The new assistant instance |
| 156 | + */ |
| 157 | + private async createAssistant( |
| 158 | + assistantDetails: AssistantBody, |
| 159 | + ): Promise<OpenAI.Beta.Assistants.Assistant> { |
| 160 | + const assistant = await GPTController.client.beta.assistants.create( |
| 161 | + assistantDetails, |
| 162 | + ); |
| 163 | + return assistant; |
| 164 | + } |
| 165 | + |
| 166 | + /* |
| 167 | + * Parameters: |
| 168 | + * - threadMessage: an instance of ThreadMessage containing the required info to create a new message |
| 169 | + * Function: Creates a new thread with an accompanied message |
| 170 | + * Returns: |
| 171 | + * - OpenAI.Beta.Thread: The new thread |
| 172 | + */ |
| 173 | + private async createThread( |
| 174 | + threadMessage: ThreadMessage, |
| 175 | + ): Promise<OpenAI.Beta.Thread> { |
| 176 | + const thread = await GPTController.client.beta.threads.create({ |
| 177 | + messages: [threadMessage], |
| 178 | + }); |
| 179 | + return thread; |
| 180 | + } |
| 181 | +} |
0 commit comments