1010//-----------------------------------------------------------------------------
1111
1212import fsp from "node:fs/promises" ;
13+ import { fileURLToPath } from "node:url" ;
14+ import { dirname , join } from "node:path" ;
1315
1416//-----------------------------------------------------------------------------
1517// Type Definitions
@@ -28,30 +30,27 @@ import fsp from "node:fs/promises";
2830 * @returns {Promise<string> } The prompt text.
2931 * @throws {Error } If the file cannot be read.
3032 */
31- async function readPrompt ( ) {
32- return fsp . readFile ( "src/prompt.txt" , "utf8" ) ;
33+ export async function readPrompt ( ) {
34+ const currentDir = dirname ( fileURLToPath ( import . meta. url ) ) ;
35+ return fsp . readFile ( join ( currentDir , "prompt.txt" ) , "utf8" ) ;
3336}
3437
35- const gptRoles = new Set ( [
36- "user" ,
37- "system" ,
38- "developer" ,
39- ] ) ;
38+ const gptRoles = new Set ( [ "user" , "system" , "developer" ] ) ;
4039
4140/**
4241 * Validates the role of a GPT message.
4342 * @param {string } role The role to validate.
4443 * @returns {void }
4544 * @throws {Error } If the role is invalid.
46- */
45+ */
4746function validateGptRole ( role ) {
48- if ( typeof role !== "string" || role . trim ( ) === "" ) {
49- throw new Error ( "Invalid role" ) ;
50- }
51-
52- if ( ! gptRoles . has ( role ) ) {
53- throw new Error ( `Invalid role: ${ role } ` ) ;
54- }
47+ if ( typeof role !== "string" || role . trim ( ) === "" ) {
48+ throw new Error ( "Invalid role" ) ;
49+ }
50+
51+ if ( ! gptRoles . has ( role ) ) {
52+ throw new Error ( `Invalid role: ${ role } ` ) ;
53+ }
5554}
5655
5756//-----------------------------------------------------------------------------
@@ -62,100 +61,98 @@ function validateGptRole(role) {
6261 * Generates a social media post using OpenAI.
6362 */
6463export class PostGenerator {
65-
66- /**
67- * The OpenAI API token.
68- * @type {string }
69- */
70- #token;
71-
72- /**
73- * The AI prompt.
74- * @type {string }
75- */
76- #prompt;
77-
78- /**
79- * Creates a new PostGenerator instance.
80- * @param {string|undefined } token The OpenAI API token.
81- * @param {Object } [options] The options for the generator.
82- * @param {string } [options.prompt] The AI prompt.
83- * @throws {Error } If the token is missing.
84- */
85- constructor ( token , { prompt = "" } = { } ) {
86- if ( ! token ) {
87- throw new Error ( "Missing OpenAI API token" ) ;
88- }
89-
90- if ( typeof token !== "string" ) {
91- throw new Error ( "OpenAI API token isn't a string" ) ;
92- }
93-
94- this . #token = token ;
95- this . #prompt = prompt ;
96- }
97-
98- /**
99- * Fetches a completion from OpenAI.
100- * @param {Object } options The options for the completion.
101- * @param {string } options.model The model to use.
102- * @param {Array<GptMessage> } options.messages The messages to send.
103- * @returns {Promise<GptChatCompletionResponse> } The completion response.
104- * @throws {Error } If the response is not ok.
105- */
106- async #fetchCompletion( { model, messages } ) {
107-
108- messages . forEach ( message => {
109- validateGptRole ( message . role ) ;
110- } ) ;
111-
112- const response = await fetch ( "https://api.openai.com/v1/chat/completions" , {
113- method : "POST" ,
114- headers : {
115- "Content-Type" : "application/json" ,
116- "Authorization" : `Bearer ${ this . #token} ` ,
117- } ,
118- body : JSON . stringify ( {
119- model,
120- messages,
121- temperature : 0.7
122- } )
123- } ) ;
124-
125- if ( ! response . ok ) {
126- throw new Error ( `${ response . status } ${ response . statusText } : Chat completion failed` ) ;
127- }
128-
129- return await response . json ( ) ;
130- }
131-
132- /**
133- * Generates a tweet summary using OpenAI.
134- * @param {string } projectName The name of the project.
135- * @param {ReleaseInfo } release The release information.
136- * @returns {Promise<string> } The generated tweet
137- */
138- async generateSocialPost ( projectName , release ) {
139-
140- const systemPrompt = this . #prompt || await readPrompt ( ) ;
141-
142- const {
143- details,
144- url,
145- version
146- } = release ;
147-
148- const completion = await this . #fetchCompletion( {
149- model : "gpt-4o-mini" ,
150- messages : [
151- { role : "system" , content : systemPrompt } ,
152- {
153- role : "user" ,
154- content : `Create a post summarizing this release for ${ projectName } ${ version } : ${ details } \n\nURL is ${ url } ` ,
155- }
156- ]
157- } ) ;
158-
159- return completion . choices [ 0 ] ?. message ?. content ;
160- }
64+ /**
65+ * The OpenAI API token.
66+ * @type {string }
67+ */
68+ #token;
69+
70+ /**
71+ * The AI prompt.
72+ * @type {string }
73+ */
74+ #prompt;
75+
76+ /**
77+ * Creates a new PostGenerator instance.
78+ * @param {string|undefined } token The OpenAI API token.
79+ * @param {Object } [options] The options for the generator.
80+ * @param {string } [options.prompt] The AI prompt.
81+ * @throws {Error } If the token is missing.
82+ */
83+ constructor ( token , { prompt = "" } = { } ) {
84+ if ( ! token ) {
85+ throw new Error ( "Missing OpenAI API token" ) ;
86+ }
87+
88+ if ( typeof token !== "string" ) {
89+ throw new Error ( "OpenAI API token isn't a string" ) ;
90+ }
91+
92+ this . #token = token ;
93+ this . #prompt = prompt ;
94+ }
95+
96+ /**
97+ * Fetches a completion from OpenAI.
98+ * @param {Object } options The options for the completion.
99+ * @param {string } options.model The model to use.
100+ * @param {Array<GptMessage> } options.messages The messages to send.
101+ * @returns {Promise<GptChatCompletionResponse> } The completion response.
102+ * @throws {Error } If the response is not ok.
103+ */
104+ async #fetchCompletion( { model, messages } ) {
105+ messages . forEach ( message => {
106+ validateGptRole ( message . role ) ;
107+ } ) ;
108+
109+ const response = await fetch (
110+ "https://api.openai.com/v1/chat/completions" ,
111+ {
112+ method : "POST" ,
113+ headers : {
114+ "Content-Type" : "application/json" ,
115+ Authorization : `Bearer ${ this . #token} ` ,
116+ } ,
117+ body : JSON . stringify ( {
118+ model,
119+ messages,
120+ temperature : 0.7 ,
121+ } ) ,
122+ } ,
123+ ) ;
124+
125+ if ( ! response . ok ) {
126+ throw new Error (
127+ `${ response . status } ${ response . statusText } : Chat completion failed` ,
128+ ) ;
129+ }
130+
131+ return await response . json ( ) ;
132+ }
133+
134+ /**
135+ * Generates a tweet summary using OpenAI.
136+ * @param {string } projectName The name of the project.
137+ * @param {ReleaseInfo } release The release information.
138+ * @returns {Promise<string> } The generated tweet
139+ */
140+ async generateSocialPost ( projectName , release ) {
141+ const systemPrompt = this . #prompt || ( await readPrompt ( ) ) ;
142+
143+ const { details, url, version } = release ;
144+
145+ const completion = await this . #fetchCompletion( {
146+ model : "gpt-4o-mini" ,
147+ messages : [
148+ { role : "system" , content : systemPrompt } ,
149+ {
150+ role : "user" ,
151+ content : `Create a post summarizing this release for ${ projectName } ${ version } : ${ details } \n\nURL is ${ url } ` ,
152+ } ,
153+ ] ,
154+ } ) ;
155+
156+ return completion . choices [ 0 ] ?. message ?. content ;
157+ }
161158}
0 commit comments