diff --git a/day-planner-agent/readme.md b/day-planner-agent/readme.md index 6cff23d..124a65b 100644 --- a/day-planner-agent/readme.md +++ b/day-planner-agent/readme.md @@ -2,9 +2,6 @@ Day Planner Agent is an AI-driven tool developed in Node.js, designed to assist in creating a professional daily schedule based on user tasks. It leverages the following core functionalities: -- Task Prioritization -- Schedule Optimization - ## Features - 📅 **Day Planning** diff --git a/email-agent/readme.md b/email-agent/readme.md index b95e8cb..1bd3b67 100644 --- a/email-agent/readme.md +++ b/email-agent/readme.md @@ -2,11 +2,6 @@ Email Agent is an agent in Node.js built with Langbase, designed to assist in analyzing and managing email communications. It leverages the following core functionalities: -- Sentiment Analysis -- Content Summarization -- Response Decision Making -- Email Reply Generation - ## Features - 📧 **Email Analysis** diff --git a/feedback-agent/readme.md b/feedback-agent/readme.md index 84e8606..2c6a8d1 100644 --- a/feedback-agent/readme.md +++ b/feedback-agent/readme.md @@ -2,11 +2,6 @@ Feedback Agent is an agent in Node.js built with Langbase, designed to assist in collecting and analyzing user feedback. It leverages the following core functionalities: -- Sentiment Analysis -- Content Summarization -- Response Decision Making -- Escalation Message Creation - ## Features - 📝 **Feedback Analysis** diff --git a/history-tutor-agent/readme.md b/history-tutor-agent/readme.md index 8655f0c..bff5489 100644 --- a/history-tutor-agent/readme.md +++ b/history-tutor-agent/readme.md @@ -2,10 +2,6 @@ History Tutor Agent is an agent in Node.js built with Langbase, designed to assist in learning history by breaking down topics into smaller, distinct subtasks. It leverages the following core functionalities: -- Topic Analysis -- Subtask Generation -- Historical Synthesis - ## Features - 📚 **Historical Analysis** diff --git a/journalist-agent/.env.example b/journalist-agent/.env.example new file mode 100644 index 0000000..48c8f10 --- /dev/null +++ b/journalist-agent/.env.example @@ -0,0 +1,3 @@ +LANGBASE_API_KEY= your-langbase-api-key +OPENAI_API_KEY= your-openai-api-key +EXA_API_KEY= your-exa-api-key \ No newline at end of file diff --git a/journalist-agent/index.ts b/journalist-agent/index.ts new file mode 100644 index 0000000..6632272 --- /dev/null +++ b/journalist-agent/index.ts @@ -0,0 +1,134 @@ +import { Langbase, Workflow } from 'langbase'; +import dotenv from 'dotenv'; + +dotenv.config(); + +async function journalistWorkflow(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const { step } = new Workflow({ + debug: true + }); + + // Step 1: Research the topic by searching the web + const searchResults = await step({ + id: 'research_topic', + run: async () => { + return await langbase.tools.webSearch({ + service: 'exa', + query: input, + totalResults: 5, + apiKey: process.env.EXA_API_KEY! + }); + } + }); + + // Step 2: Retrieve journalism guidelines from memory + const journalismGuidelines = await step({ + id: 'retrieve_guidelines', + run: async () => { + return await langbase.memories.retrieve({ + query: 'journalism style guidelines and best practices', + memory: [{ name: 'journalism-guidelines-1747869545619' }] + }); + } + }); + + // Step 3: Generate article outline + const articleOutline = await step({ + id: 'generate_outline', + run: async () => { + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: `You are a professional journalist creating an article outline. + Based on the research provided, create a detailed outline for an article on the topic. + Include a compelling headline, introduction, 3-5 main sections, and conclusion.`, + input: [ + { + role: 'user', + content: `Topic: ${input}\n\nResearch:\n${searchResults + .map( + result => + `Source: ${result.url}\n${result.content}\n` + ) + .join('\n')}` + } + ], + stream: false + }); + return output; + } + }); + + // Step 4: Write the full article + const draftArticle = await step({ + id: 'write_article', + run: async () => { + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: `You are a professional journalist writing a high-quality article. + Follow the outline provided and incorporate information from the research. + Write in a clear, engaging, and informative style. + Include proper citations and quotes from sources where appropriate. + The article should be comprehensive, accurate, and well-structured.`, + input: [ + { + role: 'user', + content: `Write a complete article based on this outline:\n${articleOutline}\n\nResearch sources:\n${searchResults + .map( + result => + `Source: ${result.url}\n${result.content}\n` + ) + .join('\n')}` + } + ], + stream: false + }); + return output; + } + }); + + // Step 5: Edit and refine the article + const finalArticle = await step({ + id: 'edit_article', + run: async () => { + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: `You are a professional editor for the New York Times. + Review and refine the article to ensure it meets the highest journalistic standards. + Apply the journalism guidelines provided. + Check for accuracy, clarity, coherence, and proper citation of sources. + Improve the language, flow, and structure where needed. + Ensure the article is balanced, objective, and free of bias. + The final article should be publication-ready.`, + input: [ + { + role: 'user', + content: `Edit and refine this article to meet New York Times standards:\n\n${draftArticle}\n\nJournalism Guidelines:\n${journalismGuidelines.map(item => item.text).join('\n')}` + } + ], + stream: false + }); + return output; + } + }); + + return { + article: finalArticle, + sources: searchResults.map(result => result.url) + }; +} + +async function main() { + const topic = 'The latest news on the stock market'; + console.log('📰 Topic:', topic); + const result = await journalistWorkflow(topic); + return result; +} + +main(); diff --git a/journalist-agent/package.json b/journalist-agent/package.json new file mode 100644 index 0000000..57b45f6 --- /dev/null +++ b/journalist-agent/package.json @@ -0,0 +1,17 @@ +{ + "name": "journalist-agent", + "version": "1.0.0", + "main": "index.js", + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "dotenv": "^16.4.7", + "langbase": "^1.1.55" + }, + "devDependencies": { + "@types/node": "^22.15.2", + "tsx": "^4.19.1" + } +} \ No newline at end of file diff --git a/journalist-agent/readme.md b/journalist-agent/readme.md new file mode 100644 index 0000000..bb34652 --- /dev/null +++ b/journalist-agent/readme.md @@ -0,0 +1,68 @@ +## Journalist Agent + +Journalist Agent is a Node.js tool using [CHAI](https://chai.new) OpenAI and Exa to create high-quality articles. It automates research, writing, and editing, ensuring content meets New York Times standards. + +## Features + +- 📰 **Web Research**: Searches the web for relevant information with Exa. +- 📝 **Writing**: Writes an engaging article. +- 📝 **Editing**: Refines the article to New York Times standards. + +## Get started + +Let's get started with the project: + +1. To get started with Langbase, you'll need to [create a free personal account on Langbase.com][signup] and verify your email address. Done? Cool, cool! + +2. Download and setup the project + + The following command: + + - Downloads the example project folder from [here](https://download-directory.github.io/?url=https://github.com/LangbaseInc/awesome-agents/tree/main/journalist-agent) + - Renames the folder to journalist-agent + - Changes the directory to the project folder + - Copies the .env.example file to .env in the project folder + + ```bash + npx degit LangbaseInc/awesome-agents/journalist-agent journalist-agent && + cd journalist-agent && + cp .env.example .env + ``` + +3. Add the values of these environment variables to the .env file: + + ```plaintext + # Get your org or user API key that can be used to access everything with Langbase. + # https://langbase.com/docs/api-reference/api-keys + LANGBASE_API_KEY="your-langbase-api-key" + OPENAI_API_KEY="your-openai-api-key" + ``` + +4. Install dependencies: + + ```bash + pnpm install + + # OR + npm install + ``` + +5. Run the journalist-agent: + + ```bash + pnpm dlx tsx index.ts + + # OR + npx tsx index.ts + ``` + +## Project Structure + +``` +journalist-agent/ +├── .env.example # Environment variables example +├── .gitignore # Git ignore +├── index.ts # Journalist agent implementation +├── package.json # Node.js package configuration and dependencies +└── readme.md # Project documentation +``` diff --git a/journalist-agent/tsconfig.json b/journalist-agent/tsconfig.json new file mode 100644 index 0000000..45f9a18 --- /dev/null +++ b/journalist-agent/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "commonjs", + "rootDir": "./", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "skipLibCheck": true + } +} \ No newline at end of file diff --git a/maths-solver-agent/.env.example b/maths-solver-agent/.env.example new file mode 100644 index 0000000..efdf539 --- /dev/null +++ b/maths-solver-agent/.env.example @@ -0,0 +1,2 @@ +LANGBASE_API_KEY= your-langbase-api-key +OPENAI_API_KEY= your-openai-api-key \ No newline at end of file diff --git a/maths-solver-agent/index.ts b/maths-solver-agent/index.ts new file mode 100644 index 0000000..fb8e108 --- /dev/null +++ b/maths-solver-agent/index.ts @@ -0,0 +1,94 @@ +import { Langbase, Workflow } from 'langbase'; +import { z } from 'zod'; +import { zodToJsonSchema } from 'zod-to-json-schema'; +import dotenv from 'dotenv'; + +dotenv.config(); + +// Define schema for structured output +const mathSolutionSchema = z.object({ + problem: z.string().describe('The original math problem'), + steps: z.array( + z.object({ + step: z.number().describe('Step number'), + description: z.string().describe('Description of this step'), + calculation: z + .string() + .describe('The calculation or mathematical work for this step'), + explanation: z + .string() + .describe('Explanation of the reasoning behind this step') + }) + ), + finalAnswer: z.string().describe('The final answer to the problem'), + additionalNotes: z + .string() + .describe('Any additional notes or alternative approaches') +}); + +// Convert zod schema to JSON schema +const mathSolutionJsonSchema = zodToJsonSchema(mathSolutionSchema, { + target: 'openAi' +}); + +async function mathSolverWorkflow(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const { step } = new Workflow({ + debug: true + }); + + // Step 2: Solve the math problem with structured output + const solution = await step({ + id: 'solve_problem', + run: async () => { + const response = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: `You are an expert math tutor. Solve the given math problem step by step, showing all your work clearly. + Break down complex problems into manageable steps. Explain your reasoning at each step. + For algebraic problems, show each transformation of the equation. + For calculus problems, explain the concepts and techniques you're using. + For geometry problems, include relevant theorems and properties. + For word problems, explain how you translate the problem into mathematical form. + Always verify your answer by checking if it satisfies the original problem.`, + input: [{ role: 'user', content: input }], + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'MathSolution', + schema: mathSolutionJsonSchema, + strict: true + } + } + }); + + return JSON.parse(response.output); + } + }); + + return solution; +} + +async function main() { + try { + // Handle both direct input and file uploads + let question = + 'There are 49 dogs signed up for a dog show. There are 36 more small dogs than large dogs. How many small dogs have signed up to compete?'; + console.log('🔢 Question:', question); + const result = await mathSolverWorkflow(question); + return result; + } catch (error) { + console.error('Error in math solver workflow:', error); + return { + error: + error || 'An error occurred while processing the math problem', + status: 500 + }; + } +} + +main(); diff --git a/maths-solver-agent/package.json b/maths-solver-agent/package.json new file mode 100644 index 0000000..4d0454d --- /dev/null +++ b/maths-solver-agent/package.json @@ -0,0 +1,19 @@ +{ + "name": "maths-solver-agent", + "version": "1.0.0", + "main": "index.js", + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "dotenv": "^16.4.7", + "langbase": "^1.1.55", + "zod": "^3.25.20", + "zod-to-json-schema": "^3.24.5" + }, + "devDependencies": { + "@types/node": "^22.15.2", + "tsx": "^4.19.1" + } +} \ No newline at end of file diff --git a/maths-solver-agent/readme.md b/maths-solver-agent/readme.md new file mode 100644 index 0000000..2d4d96d --- /dev/null +++ b/maths-solver-agent/readme.md @@ -0,0 +1,67 @@ +## Maths Solver Agent + +Maths Solver Agent is an agent in Node.js built using [CHAI](https://chai.new), designed to assist in solving maths problems by breaking down topics into smaller, distinct subtasks. It leverages the following core functionalities: + +## Features + + - **Problem Analysis**: Analyzes the problem and breaks it down into smaller steps. + - **Step-by-Step Solutions**: Provides step-by-step solutions with clear explanations in JSON format. + +## Get started + +Let's get started with the project: + +1. To get started with Langbase, you'll need to [create a free personal account on Langbase.com][signup] and verify your email address. Done? Cool, cool! + +2. Download and setup the project + + The following command: + + - Downloads the example project folder from [here](https://download-directory.github.io/?url=https://github.com/LangbaseInc/awesome-agents/tree/main/maths-solver-agent) + - Renames the folder to maths-solver-agent + - Changes the directory to the project folder + - Copies the .env.example file to .env in the project folder + + ```bash + npx degit LangbaseInc/awesome-agents/maths-solver-agent maths-solver-agent && + cd maths-solver-agent && + cp .env.example .env + ``` + +3. Add the values of these environment variables to the .env file: + + ```plaintext + # Get your org or user API key that can be used to access everything with Langbase. + # https://langbase.com/docs/api-reference/api-keys + LANGBASE_API_KEY="your-langbase-api-key" + OPENAI_API_KEY="your-openai-api-key" + ``` + +4. Install dependencies: + + ```bash + pnpm install + + # OR + npm install + ``` + +5. Run the maths-solver-agent: + + ```bash + pnpm dlx tsx index.ts + + # OR + npx tsx index.ts + ``` + +## Project Structure + +``` +maths-solver-agent/ +├── .env.example # Environment variables example +├── .gitignore # Git ignore +├── index.ts # Maths solver agent implementation +├── package.json # Node.js package configuration and dependencies +└── readme.md # Project documentation +``` diff --git a/maths-solver-agent/tsconfig.json b/maths-solver-agent/tsconfig.json new file mode 100644 index 0000000..45f9a18 --- /dev/null +++ b/maths-solver-agent/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "commonjs", + "rootDir": "./", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "skipLibCheck": true + } +} \ No newline at end of file diff --git a/meeting-prep-agent/.env.example b/meeting-prep-agent/.env.example new file mode 100644 index 0000000..48c8f10 --- /dev/null +++ b/meeting-prep-agent/.env.example @@ -0,0 +1,3 @@ +LANGBASE_API_KEY= your-langbase-api-key +OPENAI_API_KEY= your-openai-api-key +EXA_API_KEY= your-exa-api-key \ No newline at end of file diff --git a/meeting-prep-agent/index.ts b/meeting-prep-agent/index.ts new file mode 100644 index 0000000..15321a7 --- /dev/null +++ b/meeting-prep-agent/index.ts @@ -0,0 +1,352 @@ +import { Langbase, Workflow } from 'langbase'; +import { z } from 'zod'; +import { zodToJsonSchema } from 'zod-to-json-schema'; +import dotenv from 'dotenv'; + +dotenv.config(); + +async function meetingPrepWorkflow(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const { step } = new Workflow({ + debug: true + }); + + // Step 1: Parse meeting details + const meetingDetails = await step({ + id: 'parse_meeting_details', + run: async () => { + // Define schema for meeting details + const meetingDetailsSchema = z.object({ + meetingPurpose: z.string(), + attendees: z.array( + z.object({ + name: z.string(), + role: z.string(), + company: z.string() + }) + ), + scheduledDate: z.string(), + duration: z.string(), + additionalContext: z.string() + }); + + const jsonSchema = zodToJsonSchema(meetingDetailsSchema, { + target: 'openAi' + }); + + const response = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Extract structured meeting details from the user input. Include the meeting purpose, attendees (with their names, roles, and companies), scheduled date, duration, and any additional context provided.', + input: [{ role: 'user', content: input }], + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'MeetingDetails', + schema: jsonSchema, + strict: true + } + } + }); + + return JSON.parse(response.output); + } + }); + + // Step 2: Search for company and attendee information + const companyInfo = await step({ + id: 'search_company_info', + run: async () => { + const companies = [ + ...new Set(meetingDetails.attendees.map((a: any) => a.company)) + ]; + const searchResults = []; + + for (const company of companies) { + if (company === 'Our Company') continue; // Skip searching for user's own company + + const query = `${company} company overview recent news`; + const results = await langbase.tools.webSearch({ + service: 'exa', + query, + totalResults: 3, + apiKey: process.env.EXA_API_KEY! + }); + + searchResults.push({ + company, + results + }); + } + + return searchResults; + } + }); + + // Step 3: Generate company analysis + const companyAnalysis = await step({ + id: 'generate_company_analysis', + run: async () => { + const analysisPromises = companyInfo.map(async info => { + const companyContext = info.results + .map(r => `URL: ${r.url}\n${r.content}`) + .join('\n\n'); + + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Based on the provided information, create a concise company analysis that includes: 1) Company Overview, 2) Recent Developments, 3) Market Position, and 4) Potential Interests/Pain Points. Keep the analysis factual and objective.', + input: [ + { + role: 'user', + content: `Company: ${info.company}\n\nInformation:\n${companyContext}` + } + ], + stream: false + }); + + return { + company: info.company, + analysis: output + }; + }); + + return await Promise.all(analysisPromises); + } + }); + + // Step 4: Generate attendee profiles + const attendeeProfiles = await step({ + id: 'generate_attendee_profiles', + run: async () => { + const attendeePromises = meetingDetails.attendees.map( + async (attendee: any) => { + // Skip detailed analysis for internal team members + if (attendee.company === 'Our Company') { + return { + name: attendee.name, + role: attendee.role, + company: attendee.company, + profile: 'Internal team member' + }; + } + + // Find company analysis for this attendee + const companyData = companyAnalysis.find( + c => c.company === attendee.company + ); + const companyContext = companyData + ? companyData.analysis + : ''; + + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Create a professional profile for this meeting attendee based on their role and company information. Include: 1) Professional Background, 2) Likely Priorities/Interests, 3) Potential Questions They Might Ask, and 4) Recommended Approach for Interaction.', + input: [ + { + role: 'user', + content: `Attendee: ${attendee.name}\nRole: ${attendee.role}\nCompany: ${attendee.company}\n\nCompany Context:\n${companyContext}` + } + ], + stream: false + }); + + return { + name: attendee.name, + role: attendee.role, + company: attendee.company, + profile: output + }; + } + ); + + return await Promise.all(attendeePromises); + } + }); + + // Step 5: Generate meeting strategy + const meetingStrategy = await step({ + id: 'generate_meeting_strategy', + run: async () => { + const attendeeInfo = attendeeProfiles + .map(a => `${a.name} (${a.role}, ${a.company}): ${a.profile}`) + .join('\n\n'); + + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Create a comprehensive meeting strategy based on the meeting purpose and attendee profiles. Include: 1) Key Objectives, 2) Agenda Recommendations, 3) Discussion Points, 4) Potential Challenges and Mitigations, and 5) Success Criteria.', + input: [ + { + role: 'user', + content: `Meeting Purpose: ${meetingDetails.meetingPurpose}\nScheduled Date: ${meetingDetails.scheduledDate}\nDuration: ${meetingDetails.duration}\n\nAttendee Profiles:\n${attendeeInfo}\n\nAdditional Context: ${meetingDetails.additionalContext}` + } + ], + stream: false + }); + + return output; + } + }); + + // Step 6: Generate talking points and Q&A + const talkingPoints = await step({ + id: 'generate_talking_points', + run: async () => { + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Create a list of key talking points and prepare answers for potential questions. Format as: 1) Key Talking Points (bullet points), 2) Anticipated Questions and Suggested Responses (Q&A format).', + input: [ + { + role: 'user', + content: `Meeting Purpose: ${meetingDetails.meetingPurpose}\nMeeting Strategy: ${meetingStrategy}\nAttendees: ${meetingDetails.attendees.map((a: any) => `${a.name} (${a.role}, ${a.company})`).join(', ')}\nAdditional Context: ${meetingDetails.additionalContext}` + } + ], + stream: false + }); + + return output; + } + }); + + // Step 7: Generate executive summary + const executiveSummary = await step({ + id: 'generate_executive_summary', + run: async () => { + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Create a concise executive summary of the meeting preparation. This should be a brief overview that can be quickly reviewed before entering the meeting. Keep it under 300 words.', + input: [ + { + role: 'user', + content: `Meeting Purpose: ${meetingDetails.meetingPurpose}\nAttendees: ${meetingDetails.attendees.map((a: any) => `${a.name} (${a.role}, ${a.company})`).join(', ')}\nMeeting Strategy: ${meetingStrategy}\nTalking Points: ${talkingPoints}` + } + ], + stream: false + }); + + return output; + } + }); + + // Step 8: Compile final briefing document + const finalBriefing = await step({ + id: 'compile_final_briefing', + run: async () => { + const briefingSchema = z.object({ + title: z.string(), + executiveSummary: z.string(), + meetingDetails: z.object({ + purpose: z.string(), + date: z.string(), + duration: z.string(), + attendees: z.array( + z.object({ + name: z.string(), + role: z.string(), + company: z.string() + }) + ) + }), + companyAnalysis: z.array( + z.object({ + company: z.string(), + analysis: z.string() + }) + ), + attendeeProfiles: z.array( + z.object({ + name: z.string(), + role: z.string(), + company: z.string(), + profile: z.string() + }) + ), + meetingStrategy: z.string(), + talkingPoints: z.string(), + nextSteps: z.string() + }); + + const jsonSchema = zodToJsonSchema(briefingSchema, { + target: 'openAi' + }); + + const response = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Compile all the meeting preparation information into a structured briefing document. Include a title, executive summary, meeting details, company analysis, attendee profiles, meeting strategy, talking points, and recommended next steps after the meeting.', + input: [ + { + role: 'user', + content: `Meeting Purpose: ${meetingDetails.meetingPurpose} +Scheduled Date: ${meetingDetails.scheduledDate} +Duration: ${meetingDetails.duration} +Attendees: ${JSON.stringify(meetingDetails.attendees)} +Additional Context: ${meetingDetails.additionalContext} +Executive Summary: ${executiveSummary} +Company Analysis: ${JSON.stringify(companyAnalysis)} +Attendee Profiles: ${JSON.stringify(attendeeProfiles)} +Meeting Strategy: ${meetingStrategy} +Talking Points: ${talkingPoints}` + } + ], + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'MeetingBriefing', + schema: jsonSchema, + strict: true + } + } + }); + + return JSON.parse(response.output); + } + }); + + // Format the final output as a readable document + const formattedBriefing = await step({ + id: 'format_briefing', + run: async () => { + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Format the meeting briefing document into a well-structured, professional markdown document that is easy to read and navigate. Use appropriate headings, bullet points, and formatting to make the information clear and accessible.', + input: [ + { role: 'user', content: JSON.stringify(finalBriefing) } + ], + stream: false + }); + + return output; + } + }); + + return formattedBriefing; +} + +async function main() { + const input = + 'We are meeting with Ahmad from Langbase. He is the CEO of the company and we are meeting to discuss a new partnership. We are meeting on 2025-05-22 at 10:00 AM for 1 hour. The attendees are Ahmad, Jane Smith from Google, and Jim Beam from Google. The additional context is that we are meeting to discuss a new partnership and we are meeting to discuss a new partnership. The additional context is that we are meeting to discuss a new partnership and we are meeting to discuss a new partnership.'; + console.log('📅 Meeting Topic:', input); + const result = await meetingPrepWorkflow(input); + return result; +} + +main(); diff --git a/meeting-prep-agent/package.json b/meeting-prep-agent/package.json new file mode 100644 index 0000000..f2e0d49 --- /dev/null +++ b/meeting-prep-agent/package.json @@ -0,0 +1,19 @@ +{ + "name": "meeting-prep-agent", + "version": "1.0.0", + "main": "index.js", + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "dotenv": "^16.4.7", + "langbase": "^1.1.55", + "zod": "^3.25.20", + "zod-to-json-schema": "^3.24.5" + }, + "devDependencies": { + "@types/node": "^22.15.2", + "tsx": "^4.19.1" + } +} \ No newline at end of file diff --git a/meeting-prep-agent/readme.md b/meeting-prep-agent/readme.md new file mode 100644 index 0000000..2a1c7a1 --- /dev/null +++ b/meeting-prep-agent/readme.md @@ -0,0 +1,69 @@ +## Meeting Prep Agent + +Meeting Prep Agent is a Node.js tool using [CHAI](https://chai.new) OpenAI and Exa that helps you prepare for meetings by gathering and organizing information from the web and providing insights and strategies for the meeting. + +## Features + + - **Comprehensive Meeting Preparation**: Uses multiple AI agents to gather and organize information for meetings. + - **Web Search**: Integrates Exa to search the web for relevant information. + - **Insightful Analysis**: Provides context analysis, industry insights, and meeting strategies. + +## Get started + +Let's get started with the project: + +1. To get started with Langbase, you'll need to [create a free personal account on Langbase.com][signup] and verify your email address. Done? Cool, cool! + +2. Download and setup the project + + The following command: + + - Downloads the example project folder from [here](https://download-directory.github.io/?url=https://github.com/LangbaseInc/awesome-agents/tree/main/meeting-prep-agent) + - Renames the folder to meeting-prep-agent + - Changes the directory to the project folder + - Copies the .env.example file to .env in the project folder + + ```bash + npx degit LangbaseInc/awesome-agents/meeting-prep-agent meeting-prep-agent && + cd meeting-prep-agent && + cp .env.example .env + ``` + +3. Add the values of these environment variables to the .env file: + + ```plaintext + # Get your org or user API key that can be used to access everything with Langbase. + # https://langbase.com/docs/api-reference/api-keys + LANGBASE_API_KEY="your-langbase-api-key" + OPENAI_API_KEY="your-openai-api-key" + EXA_API_KEY="your-exa-api-key" + ``` + +4. Install dependencies: + + ```bash + pnpm install + + # OR + npm install + ``` + +5. Run the journalist-agent: + + ```bash + pnpm dlx tsx index.ts + + # OR + npx tsx index.ts + ``` + +## Project Structure + +``` +meeting-prep-agent/ +├── .env.example # Environment variables example +├── .gitignore # Git ignore +├── index.ts # Meeting prep agent implementation +├── package.json # Node.js package configuration and dependencies +└── readme.md # Project documentation +``` diff --git a/meeting-prep-agent/tsconfig.json b/meeting-prep-agent/tsconfig.json new file mode 100644 index 0000000..45f9a18 --- /dev/null +++ b/meeting-prep-agent/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "commonjs", + "rootDir": "./", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "skipLibCheck": true + } +} \ No newline at end of file diff --git a/news-agent/readme.md b/news-agent/readme.md index d556ea7..3969e7d 100644 --- a/news-agent/readme.md +++ b/news-agent/readme.md @@ -1,15 +1,12 @@ ## News Agent -News Agent is an agent in Node.js built with Langbase, designed to assist in summarizing the latest news in a city. It leverages the following core functionalities: - -- News Retrieval -- News Summarization +News Agent is an agent in Node.js built with Langbase, designed to assist in summarizing the latest news in a city. ## Features -- 📰 **News Analysis** - - **News Retrieval**: Fetches the top news stories from a specified city using the Exa service. - - **News Summarization**: Summarizes the retrieved news stories to provide a concise overview. + - 🔎 **News Retrieval**: Fetches the top news stories from a specified city using the Exa service. + - 📰 **News Analysis**: Analyzes the news and provides a summary of the top stories. + - 📝 **News Summarization**: Summarizes the retrieved news stories to provide a concise overview. ## Get started diff --git a/nutrition-planner-agent/readme.md b/nutrition-planner-agent/readme.md index 55d9ea4..1dca99b 100644 --- a/nutrition-planner-agent/readme.md +++ b/nutrition-planner-agent/readme.md @@ -1,19 +1,13 @@ ## Nutrition Planner Agent -Nutrition Planner Agent is an agent in Node.js built with Langbase, designed to assist in creating personalized diet plans based on user preferences and goals. It leverages the following core functionalities: - -- Nutritional Analysis -- Personalized Diet Planning +Nutrition Planner Agent is an agent in Node.js built with Langbase, designed to assist in creating personalized diet plans based on user preferences and goals. ## Features -- 🍽️ **Diet Planning** - - - **Nutritional Needs Assessment**: Analyzes user information to determine caloric and nutritional requirements. - - **Meal Scheduling**: Plans meals according to user-defined timings and dietary goals. - -- 🌍 **Regional Diet Customization** - - **Ingredient Availability**: Considers regional availability of ingredients to ensure practical meal planning. +- 🍽️ **Diet Planning**: Plans meals according to user-defined timings and dietary goals. +- 🍽️ **Ingredient Availability**: Considers regional availability of ingredients to ensure practical meal planning. +- 🍽️ **Meal Scheduling**: Plans meals according to user-defined timings and dietary goals. +- 🍽️ **Nutritional Needs**: Analyzes user information to determine caloric and nutritional requirements. ## Get started diff --git a/package.json b/package.json index 8e64ad6..9e45c23 100644 --- a/package.json +++ b/package.json @@ -4,8 +4,8 @@ "version": "1.0.0", "main": "index.js", "scripts": { - "prettier-fix": "prettier --write \"**/*.{js,ts,tsx,md,mdx}\"", - "prettier-check": "prettier --check \"**/*.{js,ts,tsx,md,mdx}\"" + "format": "prettier --write \"**/*.{js,ts,tsx,md,mdx}\"", + "format-check": "prettier --check \"**/*.{js,ts,tsx,md,mdx}\"" }, "keywords": [], "author": "", diff --git a/programming-language-tutor-agent/readme.md b/programming-language-tutor-agent/readme.md index b32639e..664ccc7 100644 --- a/programming-language-tutor-agent/readme.md +++ b/programming-language-tutor-agent/readme.md @@ -1,10 +1,6 @@ ## Programming Language Tutor Agent -Programming Language Tutor Agent is an agent in Node.js built with Langbase, designed to assist in learning programming languages by breaking down topics into smaller, distinct subtasks. It leverages the following core functionalities: - -- Topic Analysis -- Subtask Generation -- Knowledge Synthesis +Programming Language Tutor Agent is an agent in Node.js built with Langbase, designed to assist in learning programming languages by breaking down topics into smaller, distinct subtasks. ## Features diff --git a/readme.md b/readme.md index 90da4e9..d2a624c 100644 --- a/readme.md +++ b/readme.md @@ -12,7 +12,6 @@ Langbase - ## AI Agents - [Day Planner Agent](https://github.com/LangbaseInc/awesome-agents/tree/main/day-planner-agent) - Day planner agent schedules tasks based on user input. diff --git a/recipe-maker-agent/readme.md b/recipe-maker-agent/readme.md index b275855..3b94233 100644 --- a/recipe-maker-agent/readme.md +++ b/recipe-maker-agent/readme.md @@ -2,10 +2,6 @@ Recipe Maker Agent is an agent in Node.js built with Langbase, designed to assist in creating personalized recipes based on user preferences and available ingredients. It leverages the following core functionalities: -- Recipe Generation -- Ingredient Substitution -- Cooking Guidance - ## Features - 🍲 **Recipe Creation** diff --git a/resume-maker-agent/readme.md b/resume-maker-agent/readme.md index df59a13..213ee4d 100644 --- a/resume-maker-agent/readme.md +++ b/resume-maker-agent/readme.md @@ -1,10 +1,6 @@ ## Resume Maker Agent -Resume Maker Agent is an agent in Node.js built with Langbase, designed to assist in crafting professional and effective resumes based on user input. It leverages the following core functionalities: - -- Resume Crafting -- Information Gathering -- ATS Optimization +Resume Maker Agent is an agent in Node.js built with Langbase, designed to assist in crafting professional and effective resumes based on user input. ## Features diff --git a/stock-analyser-agent/.env.example b/stock-analyser-agent/.env.example new file mode 100644 index 0000000..efdf539 --- /dev/null +++ b/stock-analyser-agent/.env.example @@ -0,0 +1,2 @@ +LANGBASE_API_KEY= your-langbase-api-key +OPENAI_API_KEY= your-openai-api-key \ No newline at end of file diff --git a/stock-analyser-agent/index.ts b/stock-analyser-agent/index.ts new file mode 100644 index 0000000..080e5bf --- /dev/null +++ b/stock-analyser-agent/index.ts @@ -0,0 +1,172 @@ +import { Langbase, Workflow } from 'langbase'; +import { z } from 'zod'; +import { zodToJsonSchema } from 'zod-to-json-schema'; +import dotenv from 'dotenv'; + +dotenv.config(); + +async function investmentAgentWorkflow(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const { step } = new Workflow({ + debug: true + }); + + // Step 1: Parse the stock symbols from user input + const stockSymbols = await step({ + id: 'parse_stock_symbols', + run: async () => { + // Define schema using zod + const stockSymbolsSchema = z.object({ + symbol1: z.string(), + symbol2: z.string(), + valid: z.boolean(), + message: z.string() + }); + + // Convert to JSON schema + const jsonSchema = zodToJsonSchema(stockSymbolsSchema, { + target: 'openAi' + }); + + const response = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + "Extract the stock symbols from the user's input. If the user doesn't provide two stock symbols, set valid to false and include a message asking for clarification. If you can identify two stock symbols, set valid to true.", + input: [{ role: 'user', content: input }], + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'StockSymbols', + schema: jsonSchema, + strict: true + } + } + }); + + return JSON.parse(response.output); + } + }); + + // If the input doesn't contain valid stock symbols, return a message asking for clarification + if (!stockSymbols.valid) { + return stockSymbols.message; + } + + // Step 2: Retrieve stock data for the first symbol + const stock1Data = await step({ + id: 'retrieve_stock1_data', + run: async () => { + const results = await langbase.tools.webSearch({ + service: 'exa', + query: `${stockSymbols.symbol1} stock performance financial data price earnings ratio market cap`, + totalResults: 3, + apiKey: process.env.EXA_API_KEY! + }); + + return results; + } + }); + + // Step 3: Retrieve stock data for the second symbol + const stock2Data = await step({ + id: 'retrieve_stock2_data', + run: async () => { + const results = await langbase.tools.webSearch({ + service: 'exa', + query: `${stockSymbols.symbol2} stock performance financial data price earnings ratio market cap`, + totalResults: 3, + apiKey: process.env.EXA_API_KEY! + }); + + return results; + } + }); + + // Step 4: Retrieve latest news for both stocks + const stockNews = await step({ + id: 'retrieve_stock_news', + run: async () => { + const results = await langbase.tools.webSearch({ + service: 'exa', + query: `latest news ${stockSymbols.symbol1} ${stockSymbols.symbol2} stock market analyst recommendations`, + totalResults: 4, + apiKey: process.env.EXA_API_KEY! + }); + + return results; + } + }); + + // Step 5: Generate the investment report + const investmentReport = await step({ + id: 'generate_investment_report', + run: async () => { + // Prepare context from search results + const stock1Context = stock1Data + .map(item => `URL: ${item.url}\nContent: ${item.content}`) + .join('\n\n'); + const stock2Context = stock2Data + .map(item => `URL: ${item.url}\nContent: ${item.content}`) + .join('\n\n'); + const newsContext = stockNews + .map(item => `URL: ${item.url}\nContent: ${item.content}`) + .join('\n\n'); + + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: ` + You are an expert financial analyst. Create a comprehensive investment report comparing ${stockSymbols.symbol1} and ${stockSymbols.symbol2}. + + Use the provided data to analyze: + 1. Current stock prices and recent performance + 2. Key financial metrics (P/E ratio, market cap, revenue growth, etc.) + 3. Recent news and analyst recommendations + 4. Strengths and weaknesses of each company + 5. Comparative analysis between the two stocks + 6. Investment recommendation with rationale + + Format the report professionally with clear sections and bullet points where appropriate. + Always cite your sources when providing specific data points. + Be balanced in your analysis, highlighting both positive and negative aspects. + `, + input: [ + { + role: 'user', + content: ` + I need a detailed comparison between ${stockSymbols.symbol1} and ${stockSymbols.symbol2}. + + Here's the data for ${stockSymbols.symbol1}: + ${stock1Context} + + Here's the data for ${stockSymbols.symbol2}: + ${stock2Context} + + Recent news and analyst recommendations: + ${newsContext} + ` + } + ], + stream: false + }); + + return output; + } + }); + + return investmentReport; +} + +async function main() { + const query = 'I want to invest in Apple and Google stocks'; + console.log('🔍 Query:', query); + const result = await investmentAgentWorkflow(query); + return result; +} + +main(); diff --git a/stock-analyser-agent/package.json b/stock-analyser-agent/package.json new file mode 100644 index 0000000..c6869c3 --- /dev/null +++ b/stock-analyser-agent/package.json @@ -0,0 +1,18 @@ +{ + "name": "stock-analyser-agent", + "version": "1.0.0", + "main": "index.ts", + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "dotenv": "^16.4.7", + "langbase": "^1.1.55", + "zod": "^3.25.20", + "zod-to-json-schema": "^3.24.5" + }, + "devDependencies": { + "@types/node": "^22.15.2" + } +} diff --git a/stock-analyser-agent/readme.md b/stock-analyser-agent/readme.md new file mode 100644 index 0000000..a9dc2ab --- /dev/null +++ b/stock-analyser-agent/readme.md @@ -0,0 +1,76 @@ +## Stock Analyser Agent + +Stock Analyser Agent is an agent in Node.js built with [CHAI](https://chai.new/), designed to that compares the performance of two stocks and generates detailed reports. + +## Features + +- 📈 **Stock Performance Comparison** + + - **Compare Performance**: Compares the performance of two stocks and generates detailed reports. + +- 🏢 **Comprehensive Company Information** + + - **Retrieve Information**: Retrieves comprehensive company information for informed analysis. + +- 📰 **Latest News and Recommendations** + - **News and Recommendations**: Gets the latest company news and analyst recommendations for both stocks. + +## Get started + +Let's get started with the project: + +1. To get started with Langbase, you'll need to [create a free personal account on Langbase.com][signup] and verify your email address. Done? Cool, cool! + +2. Download and setup the project + + The following command: + + - Downloads the example project folder from [here](https://download-directory.github.io/?url=https://github.com/LangbaseInc/awesome-agents/tree/main/stock-analyser-agent) + - Renames the folder to stock-analyser-agent + - Changes the directory to the project folder + - Copies the .env.example file to .env in the project folder + + ```bash + npx degit LangbaseInc/awesome-agents/stock-analyser-agent stock-analyser-agent && + cd stock-analyser-agent && + cp .env.example .env + ``` + +3. Add the values of these environment variables to the .env file: + + ```plaintext + # Get your org or user API key that can be used to access everything with Langbase. + # https://langbase.com/docs/api-reference/api-keys + LANGBASE_API_KEY="your-langbase-api-key" + OPENAI_API_KEY="your-openai-api-key" + EXA_API_KEY="your-exa-api-key" + ``` + +4. Install dependencies: + + ```bash + pnpm install + + # OR + npm install + ``` + +5. Run the stock-analyser-agent: + + ```bash + pnpm dlx tsx index.ts + + # OR + npx tsx index.ts + ``` + +## Project Structure + +``` +stock-analyser-agent/ +├── .env.example # Environment variables example +├── .gitignore # Git ignore +├── index.ts # Stock analyser agent implementation +├── package.json # Node.js package configuration and dependencies +└── readme.md # Project documentation +``` diff --git a/stock-analyser-agent/tsconfig.json b/stock-analyser-agent/tsconfig.json new file mode 100644 index 0000000..6feaed6 --- /dev/null +++ b/stock-analyser-agent/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "commonjs", + "rootDir": "./", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "skipLibCheck": true + } +} diff --git a/support-agent/.env.example b/support-agent/.env.example new file mode 100644 index 0000000..efdf539 --- /dev/null +++ b/support-agent/.env.example @@ -0,0 +1,2 @@ +LANGBASE_API_KEY= your-langbase-api-key +OPENAI_API_KEY= your-openai-api-key \ No newline at end of file diff --git a/support-agent/index.ts b/support-agent/index.ts new file mode 100644 index 0000000..7130315 --- /dev/null +++ b/support-agent/index.ts @@ -0,0 +1,209 @@ +import { Langbase, Workflow } from 'langbase'; +import { z } from 'zod'; +import { zodToJsonSchema } from 'zod-to-json-schema'; +import fs from 'fs'; +import dotenv from 'dotenv'; + +dotenv.config(); + +async function supportAgent(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const { step } = new Workflow({ + debug: true + }); + + // Step 1: Analyze the query to optimize retrieval + const queryAnalysis = await step({ + id: 'analyze_query', + run: async () => { + const queryAnalysisSchema = z.object({ + searchQuery: z.string(), + keywords: z.array(z.string()), + expectedDocumentTypes: z.array(z.string()), + reasoning: z.string() + }); + + const queryAnalysisJsonSchema = zodToJsonSchema( + queryAnalysisSchema, + { target: 'openAi' } + ); + + const response = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + 'Analyze the user query to create an optimized search query for document retrieval. Extract key concepts and terms that would help find the most relevant documentation. Identify what types of documents would likely contain the answer.', + input: [{ role: 'user', content: input }], + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'QueryAnalysis', + schema: queryAnalysisJsonSchema, + strict: true + } + } + }); + + return JSON.parse(response.output); + } + }); + + // Step 2: Retrieve information from documentation + const docsContext = await step({ + id: 'retrieve_from_docs', + run: async () => { + const results = await langbase.memories.retrieve({ + query: queryAnalysis.searchQuery, + memory: [{ name: 'support-docs-1747874110568' }] + }); + + return { results }; + } + }); + + // Step 3: Evaluate the quality of retrieved information + const contextEvaluation = await step({ + id: 'evaluate_context', + run: async () => { + if (docsContext.results.length === 0) { + return { + hasRelevantInfo: false, + missingInfo: 'No relevant documentation found.', + confidenceScore: 0 + }; + } + + const contextEvalSchema = z.object({ + hasRelevantInfo: z.boolean(), + missingInfo: z.string(), + confidenceScore: z.number().min(0).max(10), + mostRelevantSections: z.array(z.number()), + reasoning: z.string() + }); + + const contextEvalJsonSchema = zodToJsonSchema(contextEvalSchema, { + target: 'openAi' + }); + + const contextText = docsContext.results + .map((doc, index) => `[Section ${index + 1}]\n${doc.text}`) + .join('\n\n'); + + const response = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: + "Evaluate the retrieved documentation for relevance to the user's query. Determine if it contains the information needed to answer the question completely. Identify the most relevant sections by their numbers.", + input: [ + { + role: 'user', + content: `User Query: ${input}\n\nRetrieved Documentation:\n${contextText}` + } + ], + stream: false, + response_format: { + type: 'json_schema', + json_schema: { + name: 'ContextEvaluation', + schema: contextEvalJsonSchema, + strict: true + } + } + }); + + return JSON.parse(response.output); + } + }); + + // Step 4: Generate comprehensive answer + const answer = await step({ + id: 'generate_answer', + run: async () => { + // Filter to most relevant sections if available + let relevantContext = docsContext.results; + if ( + contextEvaluation.hasRelevantInfo && + contextEvaluation.mostRelevantSections.length > 0 + ) { + relevantContext = contextEvaluation.mostRelevantSections + .map((idx: number) => docsContext.results[idx - 1]) + .filter(Boolean); + } + + const contextText = relevantContext + .map(doc => doc.text) + .join('\n\n'); + + const { output } = await langbase.agent.run({ + model: 'openai:gpt-4.1-mini', + apiKey: process.env.OPENAI_API_KEY!, + instructions: `You are a helpful support agent. Answer the user's question based ONLY on the provided documentation. + + If the documentation doesn't contain enough information to answer the question completely, acknowledge the limitations and provide the best partial answer possible based on what's available. + + Do not make up information or refer to external sources. Only use what's in the provided documentation. + + Format your answer clearly with appropriate headings, bullet points, or numbered lists when helpful. + + Documentation Context: + ${contextText}`, + input: [{ role: 'user', content: input }], + stream: false + }); + + return { + answer: output, + confidence: contextEvaluation.confidenceScore, + hasRelevantInfo: contextEvaluation.hasRelevantInfo, + missingInfo: contextEvaluation.missingInfo + }; + } + }); + + return answer; +} + +async function main() { + const query = 'What is AI Studio in Langbase?'; + console.log('🔍 Query:', query); + await setupMemory(); + const result = await supportAgent(query); + return result; +} + +main(); + +async function setupMemory() { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const memoryList = await langbase.memories.list(); + const isMemoryExists = memoryList.find( + memory => memory.name === 'support-docs-1747874110568' + ); + + if (!isMemoryExists) { + await langbase.memories.create({ + name: 'support-docs-1747874110568', + description: 'Support documents for the user query', + embedding_model: 'openai:text-embedding-3-large', + top_k: 10, + chunk_size: 1000, + chunk_overlap: 200 + }); + + // Example: Upload a PDF document + const textDocument = fs.readFileSync('./langbase-support-doc.txt'); + await langbase.memories.documents.upload({ + memoryName: 'support-docs-1747874110568', + documentName: 'langbase-support-doc.txt', + document: textDocument, + contentType: 'text/plain' + }); + } +} diff --git a/support-agent/langbase-support-doc.txt b/support-agent/langbase-support-doc.txt new file mode 100644 index 0000000..5d55172 --- /dev/null +++ b/support-agent/langbase-support-doc.txt @@ -0,0 +1,457 @@ +# Langbase: The Serverless AI Developer Platform + +Langbase is the most powerful serverless AI platform for building AI agents with memory. This comprehensive guide will help you understand how to leverage Langbase to build, deploy, and scale AI agents with tools and memory (RAG) capabilities. Compared to complex AI frameworks, Langbase offers a composable, serverless approach with a world-class developer experience that requires no bloated frameworks. + +## Core Components of Langbase + +### AI Pipes (Serverless Agents) + +Pipes are serverless AI agents with agentic tools and memory that can be deployed as easily as a website[1]. These high-level layers to Large Language Models (LLMs) create personalized AI assistants for your queries[4]. Pipes can leverage any of the 250+ supported LLM models and various tools to deliver powerful AI experiences[1]. + +Pipe agents have two primary types: +- **generate**: Used for one-off generations and transformations +- **chat**: Used for conversational interfaces[4] + +### AI Memory (Serverless RAG) + +Memory agents are Langbase's solution for semantic retrieval-augmented generation (RAG), designed as a serverless and infinitely scalable API[1]. Memory provides: + +- 30-50x cost reduction compared to competitors +- Industry-leading accuracy with advanced agentic routing +- Intelligent reranking +- Multi-tenant design supporting millions of memory RAG stores +- Per-user or per-use-case memory capabilities[1] + +### AI Studio (Dev Platform) + +Langbase Studio serves as your playground to build, collaborate, and deploy AI agents[1]. It allows you to: + +- Experiment with pipes in real-time +- Collaborate with team members +- Store messages +- Version your prompts +- Deploy prototypes to production with predictions on usage, cost, and effectiveness[7] + +## Getting Started with Langbase + +### Authentication + +Langbase uses API keys for authentication at user, organization, or pipe-specific levels[2][5]. To authenticate: + +1. Create API keys from your Langbase account +2. Include your API key in HTTP headers: +``` +Authorization: Bearer LANGBASE_API_KEY +``` + +With the Langbase SDK, set your API key as follows: +```javascript +import {Langbase} from 'langbase'; +const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY +}); +``` + +**Important**: Treat your API keys like passwords. Keep them secret and use them only on the server side[2][5]. + +### Installation + +You can install the Langbase SDK using npm: +``` +npm i langbase +``` + +For integrations using Langbase components: +``` +npm i langbase @langbase/components +``` + +## Building AI Agents with Pipes + +### Creating a Pipe + +1. Log in to your Langbase account +2. Go to `pipe.new` to create a new Pipe +3. Name your Pipe and choose the type (generate or chat) +4. Click on the `[Create Pipe]` button[7] + +### Running and Experimenting with Pipes + +Langbase AI Studio provides a playground to run and experiment with your Pipes: + +1. Configure LLM API keys for your selected model +2. For generate-type Pipes, simply run them +3. For chat-type Pipes, input a message in the Playground and run the Pipe[7] + +### Using Pipes via API + +After configuring your Pipe, you can use it via the API: + +```bash +curl https://api.langbase.com/generate \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer ' \ +-d '{ + "messages": [{ + "role": "user", + "content": "Make the titles less wordy and more engaging" + }] +}' +``` + +You can also pass dynamic variables to your prompts: + +```bash +curl https://api.langbase.com/generate \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer ' \ +-d '{ + "messages": [{ + "role": "user", + "content": "Make the titles less wordy and more engaging" + }], + "variables": [{ + "name": "topic", + "value": "Building AI with Langbase" + }] +}' +``` + +## Using Memory for RAG + +### Creating a Memory + +1. Log in to your Langbase account +2. Click on the "Memory" tab in the left sidebar +3. Click "Add New" and name your memory +4. Click "Create" to generate the memory[13] + +### Adding Documents to Memory + +1. Prepare documents you want to add to memory (text files, etc.) +2. Open your created memory +3. Upload documents by clicking on the upload area or using drag & drop +4. Wait for documents to be processed (status will change from "Queued" to "Processing" to "Ready")[13][17] + +**Note**: Memory requires an OpenAI API key for processing documents[13]. + +### Document Processing Stages + +When a document is uploaded to Memory, it undergoes several processing stages: + +1. **Chunking**: The document is split into manageable chunks +2. **Embedding**: Each chunk is converted into an embedding +3. **Indexing**: Embeddings are stored in a vector store and indexed for faster retrieval[17] + +### Connecting Memory to Pipe + +1. Open an existing Pipe or create a new one +2. Click on the "Memory" button in the Pipe editor +3. Select your memory from the "Search Memory Sets" dropdown +4. Your Pipe is now RAG-enabled with the selected memory[13] + +## Agent Architectures + +Langbase supports various agent architectures that you can implement: + +- **Augmented LLM**: The fundamental component enhanced with retrieval, tools, and memory +- **Prompt chaining**: Connecting multiple prompts in sequence +- **Agentic Routing**: Directing queries to appropriate specialized agents +- **Agent Parallelization**: Running multiple agents in parallel +- **Orchestration workers**: Coordinating multiple agents +- **Evaluator-optimizer**: Agents that evaluate and improve outputs +- **Memory Agent**: Agents with semantic memory capabilities[3] + +## Next.js Integration + +For integrating Langbase with a Next.js application: + +1. **Install required packages**: + ``` + npm install langbase + ``` + Or with components: + ``` + npm i langbase @langbase/components + ``` + +2. **Create a Server Action**: + ```javascript + 'use server'; + import { Langbase } from "langbase"; + + export async function runAgent(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const response = await langbase.pipes.run({ + stream: false, + name: 'your-pipe-name', + messages: [{ role: 'user', content: input }], + }); + + return response.completion; + } + ``` + +3. **For chatbot implementation**, add the components CSS and import the chatbot: + ```javascript + // In layout + import '@langbase/components/styles' + + // In component + 'use client'; + import { Chatbot } from '@langbase/components'; + + + ``` + +## Building a Docs Agent + +The Langbase Docs Agent transforms your documentation into an intelligent, conversational AI assistant: + +1. **Upload your data**: Create a memory agent powered by your docs, either through manual uploads or Git repository syncing +2. **Train your Docs Agent**: Set up and customize your AI Docs Agent as a Serverless Semantic RAG +3. **Integration**: Integrate the agent into your application using the ready-to-use chatbot component[8][16] + +Key features include: +- Trusted answers with source citations +- Automatic syncing with your documentation +- Customizable agent behavior +- Easy embedding and integration options +- Analytics for performance insights[16] + +## Conclusion + +Langbase offers a comprehensive, serverless AI platform for building, deploying, and scaling AI agents with memory. Its composable architecture allows developers of all skill levels to create powerful AI experiences without the complexity of traditional AI frameworks. By leveraging Pipes for agent functionality and Memory for RAG capabilities, developers can quickly build and deploy sophisticated AI solutions that integrate seamlessly with their existing applications. + +For more information and detailed documentation, visit [langbase.com/docs](https://langbase.com/docs) or join the Langbase community for support and collaboration.# Langbase: The Serverless AI Developer Platform + +Langbase is the most powerful serverless AI platform for building AI agents with memory. This comprehensive guide will help you understand how to leverage Langbase to build, deploy, and scale AI agents with tools and memory (RAG) capabilities. Compared to complex AI frameworks, Langbase offers a composable, serverless approach with a world-class developer experience that requires no bloated frameworks. + +## Core Components of Langbase + +### AI Pipes (Serverless Agents) + +Pipes are serverless AI agents with agentic tools and memory that can be deployed as easily as a website[1]. These high-level layers to Large Language Models (LLMs) create personalized AI assistants for your queries[4]. Pipes can leverage any of the 250+ supported LLM models and various tools to deliver powerful AI experiences[1]. + +Pipe agents have two primary types: +- **generate**: Used for one-off generations and transformations +- **chat**: Used for conversational interfaces[4] + +### AI Memory (Serverless RAG) + +Memory agents are Langbase's solution for semantic retrieval-augmented generation (RAG), designed as a serverless and infinitely scalable API[1]. Memory provides: + +- 30-50x cost reduction compared to competitors +- Industry-leading accuracy with advanced agentic routing +- Intelligent reranking +- Multi-tenant design supporting millions of memory RAG stores +- Per-user or per-use-case memory capabilities[1] + +### AI Studio (Dev Platform) + +Langbase Studio serves as your playground to build, collaborate, and deploy AI agents[1]. It allows you to: + +- Experiment with pipes in real-time +- Collaborate with team members +- Store messages +- Version your prompts +- Deploy prototypes to production with predictions on usage, cost, and effectiveness[7] + +## Getting Started with Langbase + +### Authentication + +Langbase uses API keys for authentication at user, organization, or pipe-specific levels[2][5]. To authenticate: + +1. Create API keys from your Langbase account +2. Include your API key in HTTP headers: +``` +Authorization: Bearer LANGBASE_API_KEY +``` + +With the Langbase SDK, set your API key as follows: +```javascript +import {Langbase} from 'langbase'; +const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY +}); +``` + +**Important**: Treat your API keys like passwords. Keep them secret and use them only on the server side[2][5]. + +### Installation + +You can install the Langbase SDK using npm: +``` +npm i langbase +``` + +For integrations using Langbase components: +``` +npm i langbase @langbase/components +``` + +## Building AI Agents with Pipes + +### Creating a Pipe + +1. Log in to your Langbase account +2. Go to `pipe.new` to create a new Pipe +3. Name your Pipe and choose the type (generate or chat) +4. Click on the `[Create Pipe]` button[7] + +### Running and Experimenting with Pipes + +Langbase AI Studio provides a playground to run and experiment with your Pipes: + +1. Configure LLM API keys for your selected model +2. For generate-type Pipes, simply run them +3. For chat-type Pipes, input a message in the Playground and run the Pipe[7] + +### Using Pipes via API + +After configuring your Pipe, you can use it via the API: + +```bash +curl https://api.langbase.com/generate \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer ' \ +-d '{ + "messages": [{ + "role": "user", + "content": "Make the titles less wordy and more engaging" + }] +}' +``` + +You can also pass dynamic variables to your prompts: + +```bash +curl https://api.langbase.com/generate \ +-H 'Content-Type: application/json' \ +-H 'Authorization: Bearer ' \ +-d '{ + "messages": [{ + "role": "user", + "content": "Make the titles less wordy and more engaging" + }], + "variables": [{ + "name": "topic", + "value": "Building AI with Langbase" + }] +}' +``` + +## Using Memory for RAG + +### Creating a Memory + +1. Log in to your Langbase account +2. Click on the "Memory" tab in the left sidebar +3. Click "Add New" and name your memory +4. Click "Create" to generate the memory[13] + +### Adding Documents to Memory + +1. Prepare documents you want to add to memory (text files, etc.) +2. Open your created memory +3. Upload documents by clicking on the upload area or using drag & drop +4. Wait for documents to be processed (status will change from "Queued" to "Processing" to "Ready")[13][17] + +**Note**: Memory requires an OpenAI API key for processing documents[13]. + +### Document Processing Stages + +When a document is uploaded to Memory, it undergoes several processing stages: + +1. **Chunking**: The document is split into manageable chunks +2. **Embedding**: Each chunk is converted into an embedding +3. **Indexing**: Embeddings are stored in a vector store and indexed for faster retrieval[17] + +### Connecting Memory to Pipe + +1. Open an existing Pipe or create a new one +2. Click on the "Memory" button in the Pipe editor +3. Select your memory from the "Search Memory Sets" dropdown +4. Your Pipe is now RAG-enabled with the selected memory[13] + +## Agent Architectures + +Langbase supports various agent architectures that you can implement: + +- **Augmented LLM**: The fundamental component enhanced with retrieval, tools, and memory +- **Prompt chaining**: Connecting multiple prompts in sequence +- **Agentic Routing**: Directing queries to appropriate specialized agents +- **Agent Parallelization**: Running multiple agents in parallel +- **Orchestration workers**: Coordinating multiple agents +- **Evaluator-optimizer**: Agents that evaluate and improve outputs +- **Memory Agent**: Agents with semantic memory capabilities[3] + +## Next.js Integration + +For integrating Langbase with a Next.js application: + +1. **Install required packages**: + ``` + npm install langbase + ``` + Or with components: + ``` + npm i langbase @langbase/components + ``` + +2. **Create a Server Action**: + ```javascript + 'use server'; + import { Langbase } from "langbase"; + + export async function runAgent(input: string) { + const langbase = new Langbase({ + apiKey: process.env.LANGBASE_API_KEY! + }); + + const response = await langbase.pipes.run({ + stream: false, + name: 'your-pipe-name', + messages: [{ role: 'user', content: input }], + }); + + return response.completion; + } + ``` + +3. **For chatbot implementation**, add the components CSS and import the chatbot: + ```javascript + // In layout + import '@langbase/components/styles' + + // In component + 'use client'; + import { Chatbot } from '@langbase/components'; + + + ``` + +## Building a Docs Agent + +The Langbase Docs Agent transforms your documentation into an intelligent, conversational AI assistant: + +1. **Upload your data**: Create a memory agent powered by your docs, either through manual uploads or Git repository syncing +2. **Train your Docs Agent**: Set up and customize your AI Docs Agent as a Serverless Semantic RAG +3. **Integration**: Integrate the agent into your application using the ready-to-use chatbot component[8][16] + +Key features include: +- Trusted answers with source citations +- Automatic syncing with your documentation +- Customizable agent behavior +- Easy embedding and integration options +- Analytics for performance insights[16] + +## Conclusion + +Langbase offers a comprehensive, serverless AI platform for building, deploying, and scaling AI agents with memory. Its composable architecture allows developers of all skill levels to create powerful AI experiences without the complexity of traditional AI frameworks. By leveraging Pipes for agent functionality and Memory for RAG capabilities, developers can quickly build and deploy sophisticated AI solutions that integrate seamlessly with their existing applications. + +For more information and detailed documentation, visit [langbase.com/docs](https://langbase.com/docs) or join the Langbase community for support and collaboration. \ No newline at end of file diff --git a/support-agent/package.json b/support-agent/package.json new file mode 100644 index 0000000..93a46a3 --- /dev/null +++ b/support-agent/package.json @@ -0,0 +1,18 @@ +{ + "name": "support-agent", + "version": "1.0.0", + "main": "index.ts", + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "dotenv": "^16.4.7", + "langbase": "^1.1.55", + "zod": "^3.25.20", + "zod-to-json-schema": "^3.24.5" + }, + "devDependencies": { + "@types/node": "^22.15.2" + } +} diff --git a/support-agent/readme.md b/support-agent/readme.md new file mode 100644 index 0000000..1a79cf5 --- /dev/null +++ b/support-agent/readme.md @@ -0,0 +1,78 @@ +## Support Agent + +Support Agent is an agent in Node.js built with [CHAI](https://chai.new), designed to assist in supporting customers with their queries by analyzing, retrieving, and evaluating relevant documentation to generate comprehensive answers. + +## Features + +- 🔍 **Query Analysis** + + - **Optimize Retrieval**: Analyzes user queries to create optimized search queries for document retrieval, extracting key concepts and terms. + +- 📚 **Information Retrieval** + + - **Document Retrieval**: Retrieves information from documentation based on the optimized search query. + +- 📝 **Context Evaluation** + + - **Evaluate Relevance**: Evaluates the quality of retrieved information to determine its relevance to the user's query. + +- 💡 **Answer Generation** + - **Comprehensive Answers**: Generates comprehensive answers based on the evaluated documentation, ensuring clarity and relevance. + +## Get started + +Let's get started with the project: + +1. To get started with Langbase, you'll need to [create a free personal account on Langbase.com][signup] and verify your email address. Done? Cool, cool! + +2. Download and setup the project + + The following command: + + - Downloads the example project folder from [here](https://download-directory.github.io/?url=https://github.com/LangbaseInc/awesome-agents/tree/main/support-agent) + - Renames the folder to example-support-agent-node + - Changes the directory to the project folder + - Copies the .env.example file to .env in the project folder + + ```bash + npx degit LangbaseInc/awesome-agents/support-agent support-agent && + cd support-agent && + cp .env.example .env + ``` + +3. Add the values of these environment variables to the .env file: + + ```plaintext + # Get your org or user API key that can be used to access everything with Langbase. + # https://langbase.com/docs/api-reference/api-keys + LANGBASE_API_KEY="your-langbase-api-key" + ``` + +4. Install dependencies: + + ```bash + pnpm install + + # OR + npm install + ``` + +5. Run the email agent: + + ```bash + pnpm run start + + # OR + npm run start + ``` + +## Project Structure + +``` +support-agent/ +├── .env.example # Environment variables example +├── .gitignore # Git ignore +├── index.ts # Support agent implementation +├── package.json # Node.js package configuration and dependencies +└── readme.md # Project documentation +``` diff --git a/support-agent/tsconfig.json b/support-agent/tsconfig.json new file mode 100644 index 0000000..6feaed6 --- /dev/null +++ b/support-agent/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "commonjs", + "rootDir": "./", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "skipLibCheck": true + } +} diff --git a/travel-planner-agent/readme.md b/travel-planner-agent/readme.md index a94e272..18beb1a 100644 --- a/travel-planner-agent/readme.md +++ b/travel-planner-agent/readme.md @@ -1,10 +1,6 @@ ## Travel Planner Agent -Travel Planner Agent is an agent in Node.js built with Langbase, designed to assist in planning comprehensive travel itineraries based on user requests. It leverages the following core functionalities: - -- Travel Orchestration -- Subtask Management -- Travel Plan Synthesis +Travel Planner Agent is an agent in Node.js built with Langbase, designed to assist in planning comprehensive travel itineraries based on user requests. ## Features diff --git a/tweet-writer-evaluator-agent/readme.md b/tweet-writer-evaluator-agent/readme.md index 4358a22..449ceb2 100644 --- a/tweet-writer-evaluator-agent/readme.md +++ b/tweet-writer-evaluator-agent/readme.md @@ -1,9 +1,6 @@ ## Tweet Writer Evaluator Agent -Tweet Writer Evaluator Agent is an agent in Node.js built with Langbase, designed to assist in creating and evaluating tweets based on user tasks. It leverages the following core functionalities: - -- Tweet Generation -- Tweet Evaluation +Tweet Writer Evaluator Agent is an agent in Node.js built with Langbase, designed to assist in creating and evaluating tweets based on user tasks. ## Features