diff --git a/.gitignore b/.gitignore index 5ef6a52..6161bc7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,8 @@ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. +# PRD +PRD + # dependencies /node_modules /.pnp diff --git a/package-lock.json b/package-lock.json index 36a33bc..0fce788 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "microblog-ai-nextjs", - "version": "0.1.0", + "version": "2.0.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "microblog-ai-nextjs", - "version": "0.1.0", + "version": "2.0.1", "dependencies": { "@heroicons/react": "^2.2.0", "@langchain/core": "^0.3.62", diff --git a/src/lib/config/langchain.ts b/src/lib/config/langchain.ts index d75f876..a3621fb 100644 --- a/src/lib/config/langchain.ts +++ b/src/lib/config/langchain.ts @@ -1,7 +1,7 @@ import { LangChainServiceConfig } from '@/types/langchain-types'; export const DEFAULT_LANGCHAIN_CONFIG: LangChainServiceConfig = { - modelName: 'gpt-4o', + modelName: 'gpt-5', temperature: 0.7, maxTokens: 500, enableLogging: process.env.NODE_ENV === 'development', diff --git a/src/shared/services/content-generation.service.ts b/src/shared/services/content-generation.service.ts index 61a1d2a..6df0d57 100644 --- a/src/shared/services/content-generation.service.ts +++ b/src/shared/services/content-generation.service.ts @@ -1,6 +1,7 @@ import { GeneratedContent, ToneOfVoice } from '@/types'; import { getLangChainMicroblogService } from './langchain.factory'; import { getGitHubModelsService } from './github-models.services'; +import { MockContentService } from './mock-content.service'; export interface ContentGenerationStrategy { generateContent(topic: string, tone: ToneOfVoice, keywords?: string): Promise; @@ -20,6 +21,12 @@ class GitHubModelsStrategy implements ContentGenerationStrategy { } } +class MockStrategy implements ContentGenerationStrategy { + async generateContent(topic: string, tone: ToneOfVoice, keywords?: string): Promise { + return MockContentService.generateMockContent(topic, tone, keywords); + } +} + export class ContentGenerationService { private static instance: ContentGenerationService; private strategy: ContentGenerationStrategy; @@ -42,11 +49,37 @@ export class ContentGenerationService { try { return await this.strategy.generateContent(topic, tone, keywords); } catch (error) { + console.warn('Primary strategy failed:', error); + + // Check if it's a rate limit error + const isRateLimit = error instanceof Error && + (error.message.includes('Rate limit') || error.message.includes('429')); + if (this.useLangChain) { - console.warn('LangChain failed, falling back to GitHub Models:', error); - this.strategy = new GitHubModelsStrategy(); - return this.strategy.generateContent(topic, tone, keywords); + console.warn('LangChain failed, trying GitHub Models...'); + try { + this.strategy = new GitHubModelsStrategy(); + return await this.strategy.generateContent(topic, tone, keywords); + } catch (githubError) { + console.warn('GitHub Models also failed:', githubError); + + // If both services fail due to rate limiting, use mock + if (isRateLimit || (githubError instanceof Error && githubError.message.includes('Rate limit'))) { + console.warn('Rate limits exceeded, using mock content for development...'); + this.strategy = new MockStrategy(); + return await this.strategy.generateContent(topic, tone, keywords); + } + throw githubError; + } + } + + // If not using LangChain and GitHub Models fails, use mock for rate limits + if (isRateLimit) { + console.warn('Rate limit exceeded, using mock content for development...'); + this.strategy = new MockStrategy(); + return await this.strategy.generateContent(topic, tone, keywords); } + throw error; } } diff --git a/src/shared/services/github-models.services.ts b/src/shared/services/github-models.services.ts index ebfade7..02bfed0 100644 --- a/src/shared/services/github-models.services.ts +++ b/src/shared/services/github-models.services.ts @@ -9,7 +9,7 @@ interface ToneGuidelines { class GitHubModelsService { private client: OpenAI; private readonly toneGuidelines: ToneGuidelines; - private readonly modelName: string = "gpt-4o"; + private readonly modelName: string = "gpt-5"; constructor() { this.validateEnvironmentVariables(); @@ -33,7 +33,7 @@ class GitHubModelsService { const userMessage = this.createUserPrompt(topic, tone, keywords); const completion = await this.client.chat.completions.create({ - model: 'gpt-4o', + model: 'gpt-5', messages: [ { role: 'system', content: systemMessage }, { role: 'user', content: userMessage } diff --git a/src/shared/services/langchain.services.ts b/src/shared/services/langchain.services.ts index e88ff2c..0b9e22b 100644 --- a/src/shared/services/langchain.services.ts +++ b/src/shared/services/langchain.services.ts @@ -31,7 +31,7 @@ export class LangChainMicroblogService { constructor(config?: Partial) { // Usar configuração padrão se não fornecida const defaultConfig: LangChainServiceConfig = { - modelName: 'gpt-4o', + modelName: 'gpt-5', temperature: 0.7, maxTokens: 500, enableLogging: process.env.NODE_ENV === 'development', @@ -168,7 +168,6 @@ export class LangChainMicroblogService { this.model = new ChatOpenAI({ modelName: this.config.modelName, temperature: this.config.temperature, - maxTokens: this.config.maxTokens, openAIApiKey: process.env.NEXT_PUBLIC_GITHUB_MODELS_TOKEN, configuration: { baseURL: process.env.NEXT_PUBLIC_GITHUB_MODELS_ENDPOINT, diff --git a/src/shared/services/langchain/langchain.client.ts b/src/shared/services/langchain/langchain.client.ts index 93e2f33..6a8e2b4 100644 --- a/src/shared/services/langchain/langchain.client.ts +++ b/src/shared/services/langchain/langchain.client.ts @@ -43,7 +43,6 @@ export class LangChainClient { this.model = new ChatOpenAI({ modelName: this.config.modelName, temperature: this.config.temperature, - maxTokens: this.config.maxTokens, openAIApiKey: process.env.NEXT_PUBLIC_GITHUB_MODELS_TOKEN, configuration: { baseURL: process.env.NEXT_PUBLIC_GITHUB_MODELS_ENDPOINT, diff --git a/src/shared/services/mock-content.service.ts b/src/shared/services/mock-content.service.ts new file mode 100644 index 0000000..0bc2ec9 --- /dev/null +++ b/src/shared/services/mock-content.service.ts @@ -0,0 +1,116 @@ +import { GeneratedContent, ToneOfVoice } from '@/types'; + +export class MockContentService { + private static mockContents: Record = { + technical: [ + { + mainContent: "🚀 Exploring the latest in AI technology: GPT models are revolutionizing how we interact with data. The precision and adaptability are game-changing! #TechTrends #AI", + hashtags: ["TechTrends", "AI", "Innovation", "GPT", "MachineLearning"], + insights: [ + "AI technology is rapidly advancing with new model capabilities", + "Data interaction patterns are evolving with AI integration", + "Technical precision in AI models improves business outcomes" + ] + }, + { + mainContent: "⚡ Performance optimization tip: Implementing proper caching strategies can reduce API calls by 80%. Always measure before optimizing! #DevTips #Performance", + hashtags: ["DevTips", "Performance", "Optimization", "Caching", "API"], + insights: [ + "Caching significantly reduces server load and response times", + "Measurement is crucial before implementing optimizations", + "API efficiency directly impacts user experience" + ] + } + ], + casual: [ + { + mainContent: "Just discovered an amazing coffee shop in downtown! ☕ The atmosphere is perfect for remote work and their WiFi is lightning fast. Highly recommend! #CoffeeLovers", + hashtags: ["CoffeeLovers", "RemoteWork", "Downtown", "Productivity"], + insights: [ + "Good work environments significantly boost productivity", + "Local businesses often provide unique working experiences", + "Fast internet is essential for modern remote work" + ] + }, + { + mainContent: "Weekend project: Building a small garden in my backyard 🌱 There's something therapeutic about getting your hands dirty and watching things grow! #Gardening", + hashtags: ["Gardening", "WeekendProject", "Nature", "SelfCare"], + insights: [ + "Gardening provides mental health benefits and stress relief", + "DIY projects offer sense of accomplishment and creativity", + "Connecting with nature improves overall well-being" + ] + } + ], + motivational: [ + { + mainContent: "💪 Every expert was once a beginner. The key is to keep pushing forward even when progress feels slow. Your breakthrough is closer than you think! #NeverGiveUp", + hashtags: ["NeverGiveUp", "Growth", "Persistence", "Success", "Motivation"], + insights: [ + "Expertise develops gradually through consistent practice", + "Progress often feels slow but compounds over time", + "Breakthrough moments come after sustained effort" + ] + }, + { + mainContent: "🌟 Challenge yourself to learn one new thing today. Growth happens outside your comfort zone. What will you discover? #PersonalGrowth #Learning", + hashtags: ["PersonalGrowth", "Learning", "Challenge", "ComfortZone"], + insights: [ + "Daily learning habits accelerate personal development", + "Comfort zones limit potential for growth and discovery", + "Small daily improvements lead to significant long-term results" + ] + } + ] + }; + + static async generateMockContent( + topic: string, + tone: ToneOfVoice, + keywords?: string + ): Promise { + // Simulate API delay + await new Promise(resolve => setTimeout(resolve, 2000)); + + const mockData = this.mockContents[tone] || this.mockContents.casual; + const selectedContent = mockData[Math.floor(Math.random() * mockData.length)]; + + // Customize content based on topic if provided + if (topic && topic.length > 10) { + const customContent = this.customizeContent(selectedContent, topic, keywords); + return customContent; + } + + return selectedContent; + } + + private static customizeContent( + baseContent: GeneratedContent, + topic: string, + keywords?: string + ): GeneratedContent { + const topicWords = topic.toLowerCase().split(' '); + const keywordList = keywords ? keywords.split(',').map(k => k.trim()) : []; + + // Create a more relevant main content + const customContent = `💡 ${topic}: This is exactly the kind of topic that sparks innovation and creativity! Let's explore the possibilities together. #Innovation`; + + // Mix original hashtags with topic-relevant ones + const relevantHashtags = [...baseContent.hashtags.slice(0, 3)]; + if (keywordList.length > 0) { + relevantHashtags.push(...keywordList.slice(0, 2).map(k => k.replace(/\s+/g, ''))); + } + + const customInsights = [ + `The topic "${topic}" offers unique opportunities for exploration`, + `Understanding this subject can lead to valuable insights and growth`, + ...baseContent.insights.slice(1) + ]; + + return { + mainContent: customContent.substring(0, 280), + hashtags: relevantHashtags, + insights: customInsights + }; + } +} diff --git a/workshop/en-us/v1/00-initial.md b/workshop/en-us/v1/00-initial.md index 2f6af16..040fe60 100644 --- a/workshop/en-us/v1/00-initial.md +++ b/workshop/en-us/v1/00-initial.md @@ -23,7 +23,7 @@ A content generation application called: Microblog AI with Next.js * In version 1.0 * A modern, responsive interface using Next.js v15 combined with Tailwind CSS - * Content generation with A.I using **[GitHub Models (OpenAI GPT-4o)](https://github.com/marketplace?type=models)** + * Content generation with A.I using **[GitHub Models (OpenAI GPT-5)](https://github.com/marketplace?type=models)** * Three different tones of voice: technical, casual, and motivational * Hashtag optimization and strategic insights, allowing you to share the generated content across various social media platforms @@ -54,7 +54,7 @@ To follow along with this workshop, you'll need to have the following installed * Docker (WSL 2 for Windows users) * Terraform * GitHub account (to create a temporary GitHub Models key) -* OpenAI account (to use GPT-4o API in production) +* OpenAI account (to use GPT-5 API in production) * Cloud service account (for app deployment). It can be: Azure, AWS, or Google Cloud—choose what works best for you! ## 🎯 Learning Objectives diff --git a/workshop/en-us/v1/01-introduction.md b/workshop/en-us/v1/01-introduction.md index 92435d2..25fd300 100644 --- a/workshop/en-us/v1/01-introduction.md +++ b/workshop/en-us/v1/01-introduction.md @@ -36,7 +36,7 @@ We’ll build the Smart Microblog Generator, a modern web application that inclu **3. Robust API** -* Integration with GitHub Models (GPT-4o) +* Integration with GitHub Models (GPT-5) * Rate limiting for protection * Detailed error handling @@ -64,7 +64,7 @@ Here’s a preview of what the app looks like: * **What it is:** Free access to AI models through GitHub * **Why we use it:** - * Access to cutting-edge models, including GPT-4o, for free + * Access to cutting-edge models, including GPT-5, for free * Easy integration for personal projects and learning (ideal for PoCs and MVPs) * No credit card required (for PoC/MVP usage) diff --git a/workshop/en-us/v1/02-configure-environment-gh-models.md b/workshop/en-us/v1/02-configure-environment-gh-models.md index 9eeefa1..ab18aa8 100644 --- a/workshop/en-us/v1/02-configure-environment-gh-models.md +++ b/workshop/en-us/v1/02-configure-environment-gh-models.md @@ -24,7 +24,7 @@ During the application development, we’ll use GitHub Models—but only during GitHub Models is a suite of AI development tools integrated into GitHub, designed to make working with AI more accessible, collaborative, and productive. Instead of managing multiple platforms and complex configurations, GitHub Models provides a unified space inside GitHub itself, where you can experiment with, compare, manage, and evaluate AI models at production scale—all within the familiar and secure GitHub workflow. -In the context of this workshop, we’ll use GitHub Models as our AI provider to generate intelligent content for our microblog, connecting our application directly to the free API (such as GPT-4o). This removes the need for credit cards, API costs, or extra infrastructure. +In the context of this workshop, we’ll use GitHub Models as our AI provider to generate intelligent content for our microblog, connecting our application directly to the free API (such as GPT-5). This removes the need for credit cards, API costs, or extra infrastructure. In the GitHub Marketplace, you can explore the list of available models, their features, and how to integrate them into your projects. It’s a great way to discover what AI can do for your applications. @@ -46,7 +46,7 @@ In the GitHub Marketplace, you can explore the list of available models, their f Our choice of GitHub Models for this project is based on three pillars: -1. **Accessibility**: Anyone with a GitHub account can access advanced models (like GPT-4o) at no cost—no credit card or infrastructure setup needed. +1. **Accessibility**: Anyone with a GitHub account can access advanced models (like GPT-5) at no cost—no credit card or infrastructure setup needed. 2. **Easy Integration**: The platform provides a developer-friendly API that integrates seamlessly into our Next.js backend. 3. **Professional-Grade Experience**: Using GitHub Models mirrors the practices of real-world AI teams—managing, evaluating, and iterating on models and prompts like in production settings. diff --git a/workshop/en-us/v1/05-integration-with-ai.md b/workshop/en-us/v1/05-integration-with-ai.md index d4c4cb4..e635553 100644 --- a/workshop/en-us/v1/05-integration-with-ai.md +++ b/workshop/en-us/v1/05-integration-with-ai.md @@ -15,7 +15,7 @@ By the end of this session, you will be able to: ## Step 1: Understanding GitHub Models and Initial Setup -**[GitHub Models](https://github.com/marketplace/models-github)** is a platform that provides access to advanced AI models through an OpenAI-compatible API. This means we can use models like GPT-4o in a scalable and reliable way, leveraging GitHub’s infrastructure to host our artificial intelligence. +**[GitHub Models](https://github.com/marketplace/models-github)** is a platform that provides access to advanced AI models through an OpenAI-compatible API. This means we can use models like GPT-5 in a scalable and reliable way, leveraging GitHub’s infrastructure to host our artificial intelligence. The big advantage of GitHub Models is that it allows us to experiment with different AI models without having to manage our own infrastructure. Additionally, compatibility with the OpenAI API means our code will be easily portable if we decide to switch to other providers in the future. And of course, it’s free to use in open-source projects, which aligns perfectly with our development philosophy. @@ -40,7 +40,7 @@ interface ToneGuidelines { class GitHubModelsService { private client: OpenAI; private readonly toneGuidelines: ToneGuidelines; - private readonly modelName: string = "gpt-4o"; + private readonly modelName: string = "gpt-5"; constructor() { // Validate environment variables at instantiation time @@ -168,7 +168,7 @@ async generateMicroblogContent( const userMessage = this.createUserPrompt(topic, tone, keywords); const completion = await this.client.chat.completions.create({ - model: 'gpt-4o', + model: 'gpt-5', messages: [ { role: 'system', content: systemMessage }, { role: 'user', content: userMessage } @@ -196,7 +196,7 @@ async generateMicroblogContent( ### Understanding the API Parameters -The model configuration is carefully optimized for our application. We use GPT-4o, which is specifically fine-tuned for creative and accurate tasks. A **temperature** of 0.7 provides a perfect balance between creativity and consistency—values closer to 0.0 are more deterministic, while those near 1.0 are highly creative and unpredictable. The 0.7 value is considered a sweet spot for generating focused yet creative content. +The model configuration is carefully optimized for our application. We use GPT-5, which is specifically fine-tuned for creative and accurate tasks. A **temperature** of 0.7 provides a perfect balance between creativity and consistency—values closer to 0.0 are more deterministic, while those near 1.0 are highly creative and unpredictable. The 0.7 value is considered a sweet spot for generating focused yet creative content. The **max\_completion\_tokens** set to 500 helps limit cost and ensures that responses are concise and to the point. The **response\_format** set to `json_object` forces the model to return structured, parseable output, eliminating the need for additional post-processing. diff --git a/workshop/en-us/v1/09-final-v1-app.md b/workshop/en-us/v1/09-final-v1-app.md index 6cc8446..1718e50 100644 --- a/workshop/en-us/v1/09-final-v1-app.md +++ b/workshop/en-us/v1/09-final-v1-app.md @@ -1,6 +1,6 @@ # Session 09: Conclusion and Next Steps -In this final session, we’ll celebrate the achievements made during version 1.0 of our **Microblog AI with Next.js** project. Throughout this journey, you’ve learned and applied advanced techniques that combine JavaScript/TypeScript, React, Next.js, Tailwind CSS, and Artificial Intelligence using GitHub Models (GPT-4o) to build a complete and sophisticated application. +In this final session, we’ll celebrate the achievements made during version 1.0 of our **Microblog AI with Next.js** project. Throughout this journey, you’ve learned and applied advanced techniques that combine JavaScript/TypeScript, React, Next.js, Tailwind CSS, and Artificial Intelligence using GitHub Models (GPT-5) to build a complete and sophisticated application. ### 🏆 What Have You Learned? diff --git a/workshop/pt-br/v1/00-initial.md b/workshop/pt-br/v1/00-initial.md index 7120f23..99a2580 100644 --- a/workshop/pt-br/v1/00-initial.md +++ b/workshop/pt-br/v1/00-initial.md @@ -23,7 +23,7 @@ Uma aplicação de geração de conteúdos que se chama: Microblog AI com Next.j - Na versão 1.0 - Interface moderna e responsiva utilizando Next.js v15 e em conjunto com o Tailwind CSS - - Geração de conteúdo com IA usando o **[GitHub Models (OpenAI GPT-4o)](https://github.com/marketplace?type=models)** + - Geração de conteúdo com IA usando o **[GitHub Models (OpenAI GPT-5)](https://github.com/marketplace?type=models)** - Três tons de voz diferentes: técnico, casual e motivacional - Com otimização de hashtags e insights estratégicos, podendo compartilhar o conteúdo gerado em diferentes plataformas de redes sociais. @@ -51,7 +51,7 @@ Para acompanhar esse workshop, você precisará ter instalado ou obter os seguin - Docker (WSL 2 para usuários Windows) - Terraform - Conta no GitHub (para criar uma chave temporária do GitHub Models) -- Conta na OpenAI (para usar a API do GPT-4o em produção) +- Conta na OpenAI (para usar a API do GPT-5 em produção) - Conta no serviço de nuvem (para deployment da aplicação). Pode ser: Azure, AWS ou Google Cloud o que for da sua melhor escolha! ## 🎯 Objetivos de Aprendizado diff --git a/workshop/pt-br/v1/01-introduction.md b/workshop/pt-br/v1/01-introduction.md index 8417d4a..20958c8 100644 --- a/workshop/pt-br/v1/01-introduction.md +++ b/workshop/pt-br/v1/01-introduction.md @@ -33,7 +33,7 @@ Vamos criar o Smart Microblog Generator, uma aplicação web e moderna que: - Sistema de cópia com feedback visual **3. API robusta** - - Integração com GitHub Models (GPT-4o) + - Integração com GitHub Models (GPT-5) - Rate limiting para proteção - Tratamento de erros detalhado @@ -57,7 +57,7 @@ Aqui está uma visão geral de como será a aplicação: **2. GitHub Models** - **O que é:** Acesso gratuito a modelos de IA através do GitHub - **Por que usar:** - - Acesso ao inúmeros modelos de IA de última geração, incluso o modelo GPT-4o sem custo. + - Acesso ao inúmeros modelos de IA de última geração, incluso o modelo GPT-5 sem custo. - Integração facilitada para projetos pessoais e aprendizado (em estágio de PoCs ou MVPs) - Sem necessidade de cartão de crédito (em estágio de PoCs ou MVPs) diff --git a/workshop/pt-br/v1/02-configure-environment-gh-models.md b/workshop/pt-br/v1/02-configure-environment-gh-models.md index 5f2d94f..df86d03 100644 --- a/workshop/pt-br/v1/02-configure-environment-gh-models.md +++ b/workshop/pt-br/v1/02-configure-environment-gh-models.md @@ -24,7 +24,7 @@ No decorrer do desenvolvimento da aplicação utlizaremos o GitHub Models. Mas, O GitHub Models é uma suíte de ferramentas integrada ao GitHub, projetada para tornar o desenvolvimento com IA mais acessível, colaborativo e produtivo. Em vez de lidar com diversas plataformas e configurações complexas, o GitHub Models oferece um espaço único dentro do próprio GitHub, onde você pode experimentar, comparar, gerenciar e avaliar modelos de IA em escala de produção — tudo isso com a segurança e a integração dos fluxos já conhecidos de projetos no GitHub. -No contexto deste workshop, usaremos o GitHub Models como fonte de IA para gerar textos inteligentes no nosso microblog, conectando nossa aplicação diretamente à API gratuita dos modelos (como GPT-4o). Isso elimina a necessidade de cartão de crédito, custos de API ou infraestrutura adicional. +No contexto deste workshop, usaremos o GitHub Models como fonte de IA para gerar textos inteligentes no nosso microblog, conectando nossa aplicação diretamente à API gratuita dos modelos (como GPT-5). Isso elimina a necessidade de cartão de crédito, custos de API ou infraestrutura adicional. No Marketplace, você pode verificar a lista de modelos disponíveis, suas funcionalidades e como integrá-los em seus projetos. É uma ótima maneira de explorar as possibilidades que a IA pode trazer para suas aplicações. @@ -46,7 +46,7 @@ No Marketplace, você pode verificar a lista de modelos disponíveis, suas funci A escolha do GitHub Models para este projeto se baseia em três pilares: -- 1. **Acessibilidade**: Qualquer pessoa com conta GitHub pode acessar modelos avançados (como GPT-4o) sem custos, sem precisar configurar infraestrutura própria ou fornecer cartão de crédito. +- 1. **Acessibilidade**: Qualquer pessoa com conta GitHub pode acessar modelos avançados (como GPT-5) sem custos, sem precisar configurar infraestrutura própria ou fornecer cartão de crédito. - 2. **Facilidade de Integração**: A plataforma oferece uma API amigável, pronta para conectar diretamente ao nosso backend em Next.js. diff --git a/workshop/pt-br/v1/05-integration-with-ai.md b/workshop/pt-br/v1/05-integration-with-ai.md index 2bec79b..9047fbd 100644 --- a/workshop/pt-br/v1/05-integration-with-ai.md +++ b/workshop/pt-br/v1/05-integration-with-ai.md @@ -14,7 +14,7 @@ Ao final desta sessão, você será capaz de: ## Passo 1: Entendendo o GitHub Models e Configuração Inicial -O **[GitHub Models](https://github.com/marketplace/models-github)** é uma plataforma que oferece acesso a modelos de IA avançados através de uma API compatível com OpenAI. Isso significa que podemos usar modelos como GPT-4o de forma escalável e confiável, aproveitando toda a infraestrutura do GitHub para hospedar nossa inteligência artificial. +O **[GitHub Models](https://github.com/marketplace/models-github)** é uma plataforma que oferece acesso a modelos de IA avançados através de uma API compatível com OpenAI. Isso significa que podemos usar modelos como GPT-5 de forma escalável e confiável, aproveitando toda a infraestrutura do GitHub para hospedar nossa inteligência artificial. A grande vantagem do GitHub Models é que ele nos permite experimentar com diferentes modelos de IA sem precisar gerenciar nossa própria infraestrutura. Além disso, a compatibilidade com a API da OpenAI significa que nosso código será facilmente portável caso decidamos migrar para outros provedores no futuro. E, claro, é gratuito para uso em projetos de código aberto, o que se alinha perfeitamente com nossa filosofia de desenvolvimento. @@ -39,7 +39,7 @@ interface ToneGuidelines { class GitHubModelsService { private client: OpenAI; private readonly toneGuidelines: ToneGuidelines; - private readonly modelName: string = "gpt-4o"; + private readonly modelName: string = "gpt-5"; constructor() { // Validação de variáveis de ambiente no momento da instanciação @@ -166,7 +166,7 @@ async generateMicroblogContent( const userMessage = this.createUserPrompt(topic, tone, keywords); const completion = await this.client.chat.completions.create({ - model: 'gpt-4o', + model: 'gpt-5', messages: [ { role: 'system', content: systemMessage }, { role: 'user', content: userMessage } @@ -194,7 +194,7 @@ async generateMicroblogContent( ### Compreendendo os parâmetros da API -A configuração do modelo é cuidadosamente otimizada para nossa aplicação. Utilizamos o GPT-4o que é especificamente otimizado para tarefas criativas e precisas. A **temperature** de 0.7 representa um equilíbrio perfeito entre criatividade e consistência - valores próximos de 0.0 são muito determinísticos, enquanto valores próximos de 1.0 são muito criativos e aleatórios. O valor 0.7 é considerado o "sweet spot" para conteúdo criativo mas ainda focado. +A configuração do modelo é cuidadosamente otimizada para nossa aplicação. Utilizamos o GPT-5 que é especificamente otimizado para tarefas criativas e precisas. A **temperature** de 0.7 representa um equilíbrio perfeito entre criatividade e consistência - valores próximos de 0.0 são muito determinísticos, enquanto valores próximos de 1.0 são muito criativos e aleatórios. O valor 0.7 é considerado o "sweet spot" para conteúdo criativo mas ainda focado. O **max_completion_tokens** de 500 tokens não apenas limita nossos custos, mas também garante que as respostas sejam concisas e diretas ao ponto. O parâmetro **response_format** configurado como 'json_object' força a IA a retornar uma resposta estruturada e parseável, eliminando a necessidade de processamento adicional de texto. diff --git a/workshop/pt-br/v1/09-final-v1-app.md b/workshop/pt-br/v1/09-final-v1-app.md index 4096033..d28444d 100644 --- a/workshop/pt-br/v1/09-final-v1-app.md +++ b/workshop/pt-br/v1/09-final-v1-app.md @@ -1,6 +1,6 @@ ## Sessão 09: Conclusão v1 e Próximos Passos -Nesta sessão final, celebraremos as conquistas obtidas durante a versão 1.0 do nosso projeto **Microblog AI com Next.js**. Ao longo dessa jornada, você aprendeu e aplicou técnicas avançadas que unem JavaScript/TypeScript, React, Next.js, Tailwind CSS e Inteligência Artificial com o GitHub Models (GPT-4o), para criar uma aplicação completa e sofisticada. +Nesta sessão final, celebraremos as conquistas obtidas durante a versão 1.0 do nosso projeto **Microblog AI com Next.js**. Ao longo dessa jornada, você aprendeu e aplicou técnicas avançadas que unem JavaScript/TypeScript, React, Next.js, Tailwind CSS e Inteligência Artificial com o GitHub Models (GPT-5), para criar uma aplicação completa e sofisticada. ### 🏆 O que você aprendeu?