Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.

# PRD
PRD

# dependencies
/node_modules
/.pnp
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion src/lib/config/langchain.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { LangChainServiceConfig } from '@/types/langchain-types';

export const DEFAULT_LANGCHAIN_CONFIG: LangChainServiceConfig = {
modelName: 'gpt-4o',
modelName: 'gpt-5',
temperature: 0.7,
maxTokens: 500,
enableLogging: process.env.NODE_ENV === 'development',
Expand Down
39 changes: 36 additions & 3 deletions src/shared/services/content-generation.service.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { GeneratedContent, ToneOfVoice } from '@/types';
import { getLangChainMicroblogService } from './langchain.factory';
import { getGitHubModelsService } from './github-models.services';
import { MockContentService } from './mock-content.service';

export interface ContentGenerationStrategy {
generateContent(topic: string, tone: ToneOfVoice, keywords?: string): Promise<GeneratedContent>;
Expand All @@ -20,6 +21,12 @@ class GitHubModelsStrategy implements ContentGenerationStrategy {
}
}

class MockStrategy implements ContentGenerationStrategy {
async generateContent(topic: string, tone: ToneOfVoice, keywords?: string): Promise<GeneratedContent> {
return MockContentService.generateMockContent(topic, tone, keywords);
}
}

export class ContentGenerationService {
private static instance: ContentGenerationService;
private strategy: ContentGenerationStrategy;
Expand All @@ -42,11 +49,37 @@ export class ContentGenerationService {
try {
return await this.strategy.generateContent(topic, tone, keywords);
} catch (error) {
console.warn('Primary strategy failed:', error);

// Check if it's a rate limit error
const isRateLimit = error instanceof Error &&
(error.message.includes('Rate limit') || error.message.includes('429'));

if (this.useLangChain) {
console.warn('LangChain failed, falling back to GitHub Models:', error);
this.strategy = new GitHubModelsStrategy();
return this.strategy.generateContent(topic, tone, keywords);
console.warn('LangChain failed, trying GitHub Models...');
try {
this.strategy = new GitHubModelsStrategy();
return await this.strategy.generateContent(topic, tone, keywords);
} catch (githubError) {
console.warn('GitHub Models also failed:', githubError);

// If both services fail due to rate limiting, use mock
if (isRateLimit || (githubError instanceof Error && githubError.message.includes('Rate limit'))) {
console.warn('Rate limits exceeded, using mock content for development...');
this.strategy = new MockStrategy();
return await this.strategy.generateContent(topic, tone, keywords);
}
throw githubError;
}
}

// If not using LangChain and GitHub Models fails, use mock for rate limits
if (isRateLimit) {
console.warn('Rate limit exceeded, using mock content for development...');
this.strategy = new MockStrategy();
return await this.strategy.generateContent(topic, tone, keywords);
}

throw error;
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/shared/services/github-models.services.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ interface ToneGuidelines {
class GitHubModelsService {
private client: OpenAI;
private readonly toneGuidelines: ToneGuidelines;
private readonly modelName: string = "gpt-4o";
private readonly modelName: string = "gpt-5";

constructor() {
this.validateEnvironmentVariables();
Expand All @@ -33,7 +33,7 @@ class GitHubModelsService {
const userMessage = this.createUserPrompt(topic, tone, keywords);

const completion = await this.client.chat.completions.create({
model: 'gpt-4o',
model: 'gpt-5',
messages: [
{ role: 'system', content: systemMessage },
{ role: 'user', content: userMessage }
Expand Down
3 changes: 1 addition & 2 deletions src/shared/services/langchain.services.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ export class LangChainMicroblogService {
constructor(config?: Partial<LangChainServiceConfig>) {
// Usar configuração padrão se não fornecida
const defaultConfig: LangChainServiceConfig = {
modelName: 'gpt-4o',
modelName: 'gpt-5',
temperature: 0.7,
maxTokens: 500,
enableLogging: process.env.NODE_ENV === 'development',
Expand Down Expand Up @@ -168,7 +168,6 @@ export class LangChainMicroblogService {
this.model = new ChatOpenAI({
modelName: this.config.modelName,
temperature: this.config.temperature,
maxTokens: this.config.maxTokens,
openAIApiKey: process.env.NEXT_PUBLIC_GITHUB_MODELS_TOKEN,
configuration: {
baseURL: process.env.NEXT_PUBLIC_GITHUB_MODELS_ENDPOINT,
Expand Down
1 change: 0 additions & 1 deletion src/shared/services/langchain/langchain.client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ export class LangChainClient {
this.model = new ChatOpenAI({
modelName: this.config.modelName,
temperature: this.config.temperature,
maxTokens: this.config.maxTokens,
openAIApiKey: process.env.NEXT_PUBLIC_GITHUB_MODELS_TOKEN,
configuration: {
baseURL: process.env.NEXT_PUBLIC_GITHUB_MODELS_ENDPOINT,
Expand Down
116 changes: 116 additions & 0 deletions src/shared/services/mock-content.service.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import { GeneratedContent, ToneOfVoice } from '@/types';

export class MockContentService {
private static mockContents: Record<string, GeneratedContent[]> = {
technical: [
{
mainContent: "🚀 Exploring the latest in AI technology: GPT models are revolutionizing how we interact with data. The precision and adaptability are game-changing! #TechTrends #AI",
hashtags: ["TechTrends", "AI", "Innovation", "GPT", "MachineLearning"],
insights: [
"AI technology is rapidly advancing with new model capabilities",
"Data interaction patterns are evolving with AI integration",
"Technical precision in AI models improves business outcomes"
]
},
{
mainContent: "⚡ Performance optimization tip: Implementing proper caching strategies can reduce API calls by 80%. Always measure before optimizing! #DevTips #Performance",
hashtags: ["DevTips", "Performance", "Optimization", "Caching", "API"],
insights: [
"Caching significantly reduces server load and response times",
"Measurement is crucial before implementing optimizations",
"API efficiency directly impacts user experience"
]
}
],
casual: [
{
mainContent: "Just discovered an amazing coffee shop in downtown! ☕ The atmosphere is perfect for remote work and their WiFi is lightning fast. Highly recommend! #CoffeeLovers",
hashtags: ["CoffeeLovers", "RemoteWork", "Downtown", "Productivity"],
insights: [
"Good work environments significantly boost productivity",
"Local businesses often provide unique working experiences",
"Fast internet is essential for modern remote work"
]
},
{
mainContent: "Weekend project: Building a small garden in my backyard 🌱 There's something therapeutic about getting your hands dirty and watching things grow! #Gardening",
hashtags: ["Gardening", "WeekendProject", "Nature", "SelfCare"],
insights: [
"Gardening provides mental health benefits and stress relief",
"DIY projects offer sense of accomplishment and creativity",
"Connecting with nature improves overall well-being"
]
}
],
motivational: [
{
mainContent: "💪 Every expert was once a beginner. The key is to keep pushing forward even when progress feels slow. Your breakthrough is closer than you think! #NeverGiveUp",
hashtags: ["NeverGiveUp", "Growth", "Persistence", "Success", "Motivation"],
insights: [
"Expertise develops gradually through consistent practice",
"Progress often feels slow but compounds over time",
"Breakthrough moments come after sustained effort"
]
},
{
mainContent: "🌟 Challenge yourself to learn one new thing today. Growth happens outside your comfort zone. What will you discover? #PersonalGrowth #Learning",
hashtags: ["PersonalGrowth", "Learning", "Challenge", "ComfortZone"],
insights: [
"Daily learning habits accelerate personal development",
"Comfort zones limit potential for growth and discovery",
"Small daily improvements lead to significant long-term results"
]
}
]
};

static async generateMockContent(
topic: string,
tone: ToneOfVoice,
keywords?: string
): Promise<GeneratedContent> {
// Simulate API delay
await new Promise(resolve => setTimeout(resolve, 2000));

const mockData = this.mockContents[tone] || this.mockContents.casual;
const selectedContent = mockData[Math.floor(Math.random() * mockData.length)];

// Customize content based on topic if provided
if (topic && topic.length > 10) {
const customContent = this.customizeContent(selectedContent, topic, keywords);
return customContent;
}

return selectedContent;
}

private static customizeContent(
baseContent: GeneratedContent,
topic: string,
keywords?: string
): GeneratedContent {
const topicWords = topic.toLowerCase().split(' ');
const keywordList = keywords ? keywords.split(',').map(k => k.trim()) : [];

// Create a more relevant main content
const customContent = `💡 ${topic}: This is exactly the kind of topic that sparks innovation and creativity! Let's explore the possibilities together. #Innovation`;

// Mix original hashtags with topic-relevant ones
const relevantHashtags = [...baseContent.hashtags.slice(0, 3)];
if (keywordList.length > 0) {
relevantHashtags.push(...keywordList.slice(0, 2).map(k => k.replace(/\s+/g, '')));
}

const customInsights = [
`The topic "${topic}" offers unique opportunities for exploration`,
`Understanding this subject can lead to valuable insights and growth`,
...baseContent.insights.slice(1)
];

return {
mainContent: customContent.substring(0, 280),
hashtags: relevantHashtags,
insights: customInsights
};
}
}
4 changes: 2 additions & 2 deletions workshop/en-us/v1/00-initial.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ A content generation application called: Microblog AI with Next.js
* In version 1.0

* A modern, responsive interface using Next.js v15 combined with Tailwind CSS
* Content generation with A.I using **[GitHub Models (OpenAI GPT-4o)](https://github.com/marketplace?type=models)**
* Content generation with A.I using **[GitHub Models (OpenAI GPT-5)](https://github.com/marketplace?type=models)**
* Three different tones of voice: technical, casual, and motivational
* Hashtag optimization and strategic insights, allowing you to share the generated content across various social media platforms

Expand Down Expand Up @@ -54,7 +54,7 @@ To follow along with this workshop, you'll need to have the following installed
* Docker (WSL 2 for Windows users)
* Terraform
* GitHub account (to create a temporary GitHub Models key)
* OpenAI account (to use GPT-4o API in production)
* OpenAI account (to use GPT-5 API in production)
* Cloud service account (for app deployment). It can be: Azure, AWS, or Google Cloud—choose what works best for you!

## 🎯 Learning Objectives
Expand Down
4 changes: 2 additions & 2 deletions workshop/en-us/v1/01-introduction.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ We’ll build the Smart Microblog Generator, a modern web application that inclu

**3. Robust API**

* Integration with GitHub Models (GPT-4o)
* Integration with GitHub Models (GPT-5)
* Rate limiting for protection
* Detailed error handling

Expand Down Expand Up @@ -64,7 +64,7 @@ Here’s a preview of what the app looks like:
* **What it is:** Free access to AI models through GitHub
* **Why we use it:**

* Access to cutting-edge models, including GPT-4o, for free
* Access to cutting-edge models, including GPT-5, for free
* Easy integration for personal projects and learning (ideal for PoCs and MVPs)
* No credit card required (for PoC/MVP usage)

Expand Down
4 changes: 2 additions & 2 deletions workshop/en-us/v1/02-configure-environment-gh-models.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ During the application development, we’ll use GitHub Models—but only during

GitHub Models is a suite of AI development tools integrated into GitHub, designed to make working with AI more accessible, collaborative, and productive. Instead of managing multiple platforms and complex configurations, GitHub Models provides a unified space inside GitHub itself, where you can experiment with, compare, manage, and evaluate AI models at production scale—all within the familiar and secure GitHub workflow.

In the context of this workshop, we’ll use GitHub Models as our AI provider to generate intelligent content for our microblog, connecting our application directly to the free API (such as GPT-4o). This removes the need for credit cards, API costs, or extra infrastructure.
In the context of this workshop, we’ll use GitHub Models as our AI provider to generate intelligent content for our microblog, connecting our application directly to the free API (such as GPT-5). This removes the need for credit cards, API costs, or extra infrastructure.

In the GitHub Marketplace, you can explore the list of available models, their features, and how to integrate them into your projects. It’s a great way to discover what AI can do for your applications.

Expand All @@ -46,7 +46,7 @@ In the GitHub Marketplace, you can explore the list of available models, their f

Our choice of GitHub Models for this project is based on three pillars:

1. **Accessibility**: Anyone with a GitHub account can access advanced models (like GPT-4o) at no cost—no credit card or infrastructure setup needed.
1. **Accessibility**: Anyone with a GitHub account can access advanced models (like GPT-5) at no cost—no credit card or infrastructure setup needed.
2. **Easy Integration**: The platform provides a developer-friendly API that integrates seamlessly into our Next.js backend.
3. **Professional-Grade Experience**: Using GitHub Models mirrors the practices of real-world AI teams—managing, evaluating, and iterating on models and prompts like in production settings.

Expand Down
8 changes: 4 additions & 4 deletions workshop/en-us/v1/05-integration-with-ai.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ By the end of this session, you will be able to:

## Step 1: Understanding GitHub Models and Initial Setup

**[GitHub Models](https://github.com/marketplace/models-github)** is a platform that provides access to advanced AI models through an OpenAI-compatible API. This means we can use models like GPT-4o in a scalable and reliable way, leveraging GitHub’s infrastructure to host our artificial intelligence.
**[GitHub Models](https://github.com/marketplace/models-github)** is a platform that provides access to advanced AI models through an OpenAI-compatible API. This means we can use models like GPT-5 in a scalable and reliable way, leveraging GitHub’s infrastructure to host our artificial intelligence.

The big advantage of GitHub Models is that it allows us to experiment with different AI models without having to manage our own infrastructure. Additionally, compatibility with the OpenAI API means our code will be easily portable if we decide to switch to other providers in the future. And of course, it’s free to use in open-source projects, which aligns perfectly with our development philosophy.

Expand All @@ -40,7 +40,7 @@ interface ToneGuidelines {
class GitHubModelsService {
private client: OpenAI;
private readonly toneGuidelines: ToneGuidelines;
private readonly modelName: string = "gpt-4o";
private readonly modelName: string = "gpt-5";

constructor() {
// Validate environment variables at instantiation time
Expand Down Expand Up @@ -168,7 +168,7 @@ async generateMicroblogContent(
const userMessage = this.createUserPrompt(topic, tone, keywords);

const completion = await this.client.chat.completions.create({
model: 'gpt-4o',
model: 'gpt-5',
messages: [
{ role: 'system', content: systemMessage },
{ role: 'user', content: userMessage }
Expand Down Expand Up @@ -196,7 +196,7 @@ async generateMicroblogContent(

### Understanding the API Parameters

The model configuration is carefully optimized for our application. We use GPT-4o, which is specifically fine-tuned for creative and accurate tasks. A **temperature** of 0.7 provides a perfect balance between creativity and consistency—values closer to 0.0 are more deterministic, while those near 1.0 are highly creative and unpredictable. The 0.7 value is considered a sweet spot for generating focused yet creative content.
The model configuration is carefully optimized for our application. We use GPT-5, which is specifically fine-tuned for creative and accurate tasks. A **temperature** of 0.7 provides a perfect balance between creativity and consistency—values closer to 0.0 are more deterministic, while those near 1.0 are highly creative and unpredictable. The 0.7 value is considered a sweet spot for generating focused yet creative content.

The **max\_completion\_tokens** set to 500 helps limit cost and ensures that responses are concise and to the point. The **response\_format** set to `json_object` forces the model to return structured, parseable output, eliminating the need for additional post-processing.

Expand Down
2 changes: 1 addition & 1 deletion workshop/en-us/v1/09-final-v1-app.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Session 09: Conclusion and Next Steps

In this final session, we’ll celebrate the achievements made during version 1.0 of our **Microblog AI with Next.js** project. Throughout this journey, you’ve learned and applied advanced techniques that combine JavaScript/TypeScript, React, Next.js, Tailwind CSS, and Artificial Intelligence using GitHub Models (GPT-4o) to build a complete and sophisticated application.
In this final session, we’ll celebrate the achievements made during version 1.0 of our **Microblog AI with Next.js** project. Throughout this journey, you’ve learned and applied advanced techniques that combine JavaScript/TypeScript, React, Next.js, Tailwind CSS, and Artificial Intelligence using GitHub Models (GPT-5) to build a complete and sophisticated application.

### 🏆 What Have You Learned?

Expand Down
4 changes: 2 additions & 2 deletions workshop/pt-br/v1/00-initial.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Uma aplicação de geração de conteúdos que se chama: Microblog AI com Next.j
- Na versão 1.0

- Interface moderna e responsiva utilizando Next.js v15 e em conjunto com o Tailwind CSS
- Geração de conteúdo com IA usando o **[GitHub Models (OpenAI GPT-4o)](https://github.com/marketplace?type=models)**
- Geração de conteúdo com IA usando o **[GitHub Models (OpenAI GPT-5)](https://github.com/marketplace?type=models)**
- Três tons de voz diferentes: técnico, casual e motivacional
- Com otimização de hashtags e insights estratégicos, podendo compartilhar o conteúdo gerado em diferentes plataformas de redes sociais.

Expand Down Expand Up @@ -51,7 +51,7 @@ Para acompanhar esse workshop, você precisará ter instalado ou obter os seguin
- Docker (WSL 2 para usuários Windows)
- Terraform
- Conta no GitHub (para criar uma chave temporária do GitHub Models)
- Conta na OpenAI (para usar a API do GPT-4o em produção)
- Conta na OpenAI (para usar a API do GPT-5 em produção)
- Conta no serviço de nuvem (para deployment da aplicação). Pode ser: Azure, AWS ou Google Cloud o que for da sua melhor escolha!

## 🎯 Objetivos de Aprendizado
Expand Down
4 changes: 2 additions & 2 deletions workshop/pt-br/v1/01-introduction.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Vamos criar o Smart Microblog Generator, uma aplicação web e moderna que:
- Sistema de cópia com feedback visual

**3. API robusta**
- Integração com GitHub Models (GPT-4o)
- Integração com GitHub Models (GPT-5)
- Rate limiting para proteção
- Tratamento de erros detalhado

Expand All @@ -57,7 +57,7 @@ Aqui está uma visão geral de como será a aplicação:
**2. GitHub Models**
- **O que é:** Acesso gratuito a modelos de IA através do GitHub
- **Por que usar:**
- Acesso ao inúmeros modelos de IA de última geração, incluso o modelo GPT-4o sem custo.
- Acesso ao inúmeros modelos de IA de última geração, incluso o modelo GPT-5 sem custo.
- Integração facilitada para projetos pessoais e aprendizado (em estágio de PoCs ou MVPs)
- Sem necessidade de cartão de crédito (em estágio de PoCs ou MVPs)

Expand Down
Loading