diff --git a/assets/css/index.css b/assets/css/index.css
index f91f8f6f3d..a9cd1497f3 100644
--- a/assets/css/index.css
+++ b/assets/css/index.css
@@ -1046,4 +1046,490 @@ a[href*="#no-click"], img[src*="#no-click"] {
.copy-button:hover {
background-color: #ddd;
-}
\ No newline at end of file
+}
+
+/* Redis AI Agent Builder Styles */
+
+.agent-builder-container {
+ @apply max-w-4xl mx-auto p-6 bg-white rounded-lg border border-redis-pen-800 shadow-lg;
+}
+
+.agent-builder-header {
+ @apply text-center mb-8;
+}
+
+/* Chat Interface Styles */
+.chat-container {
+ @apply bg-gray-50 rounded-lg border border-redis-pen-300 overflow-hidden;
+ min-height: 400px;
+ display: flex;
+ flex-direction: column;
+}
+
+.chat-messages {
+ @apply flex-1 p-4 space-y-4 overflow-y-auto;
+ max-height: 500px;
+}
+
+.chat-message {
+ @apply flex items-start;
+}
+
+.bot-message {
+ @apply justify-start;
+}
+
+.bot-message .message-avatar {
+ @apply mr-3;
+}
+
+.bot-message .message-content {
+ @apply bg-white border border-redis-pen-300 text-redis-ink-900;
+}
+
+.user-message {
+ @apply justify-end;
+}
+
+.user-message .message-avatar {
+ @apply order-2 ml-3 bg-blue-100;
+}
+
+.user-message .message-content {
+ @apply order-1 bg-blue-500 text-white;
+}
+
+.message-avatar {
+ @apply w-8 h-8 rounded-full bg-red-100 flex items-center justify-center flex-shrink-0;
+}
+
+.message-content {
+ @apply max-w-xs lg:max-w-md px-4 py-2 rounded-lg;
+}
+
+.suggestion-chips {
+ @apply flex flex-wrap gap-2 mt-3;
+}
+
+.suggestion-chip {
+ @apply inline-flex items-center px-3 py-1 rounded-full text-sm bg-red-50 text-redis-red-700 border border-red-200;
+ @apply hover:bg-red-100 focus:outline-none focus:ring-2 focus:ring-redis-red-500 focus:ring-offset-1;
+ @apply transition-colors duration-200 cursor-pointer;
+}
+
+.suggestion-chip:hover {
+ @apply bg-red-100 border-red-300;
+}
+
+.chat-input-container {
+ @apply relative border-t border-redis-pen-300 bg-white;
+}
+
+.chat-input-wrapper {
+ @apply flex items-center p-4 space-x-3;
+}
+
+.chat-input {
+ @apply flex-1 px-4 py-2 border border-redis-pen-300 rounded-lg;
+ @apply focus:outline-none focus:ring-2 focus:ring-redis-red-500 focus:border-redis-red-500;
+ @apply transition-colors duration-200;
+}
+
+.send-button {
+ @apply p-2 rounded-lg bg-redis-red-500 text-white;
+ @apply hover:bg-redis-red-600 focus:outline-none focus:ring-2 focus:ring-redis-red-500 focus:ring-offset-2;
+ @apply disabled:opacity-50 disabled:cursor-not-allowed disabled:hover:bg-redis-red-500;
+ @apply transition-colors duration-200;
+}
+
+.suggestions-dropdown {
+ @apply absolute bottom-full left-4 right-4 mb-2 bg-white border border-redis-pen-300 rounded-lg shadow-lg z-10;
+ @apply max-h-48 overflow-y-auto;
+}
+
+.suggestions-list {
+ @apply py-2;
+}
+
+.suggestion-item {
+ @apply px-4 py-2 text-sm text-redis-ink-900 hover:bg-red-50 cursor-pointer;
+ @apply flex items-center space-x-2;
+}
+
+.suggestion-item:hover {
+ @apply bg-red-50;
+}
+
+.suggestion-item.highlighted {
+ @apply bg-red-100;
+}
+
+/* Responsive Design for Chat Interface */
+@media (max-width: 768px) {
+ .chat-container {
+ min-height: 350px;
+ }
+
+ .chat-messages {
+ max-height: 400px;
+ }
+
+ .message-content {
+ @apply max-w-xs;
+ }
+
+ .suggestion-chips {
+ @apply flex-col space-y-2;
+ }
+
+ .suggestion-chip {
+ @apply w-full justify-center;
+ }
+
+ .chat-input-wrapper {
+ @apply flex-col space-y-3 space-x-0;
+ }
+
+ .chat-input {
+ @apply w-full;
+ }
+
+ .send-button {
+ @apply w-full;
+ }
+}
+
+/* Form Groups */
+.form-group {
+ @apply space-y-2;
+}
+
+.form-label {
+ @apply block text-sm font-medium text-redis-ink-900;
+}
+
+.label-text {
+ @apply mr-1;
+}
+
+.label-required {
+ @apply text-redis-red-500;
+}
+
+/* Form Controls */
+.form-select,
+.form-input {
+ @apply w-full px-3 py-2 border border-redis-pen-700 rounded-md shadow-sm;
+ @apply focus:outline-none focus:ring-2 focus:ring-redis-red-500 focus:border-redis-red-500;
+ @apply transition-colors duration-200;
+}
+
+.form-select {
+ @apply bg-white cursor-pointer;
+ background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
+ background-position: right 0.75rem center;
+ background-repeat: no-repeat;
+ background-size: 1.5em 1.5em;
+ padding-right: 2.5rem;
+}
+
+.form-input:focus,
+.form-select:focus {
+ @apply border-redis-red-500 ring-redis-red-500;
+}
+
+.form-help-text {
+ @apply text-sm text-redis-pen-600 mt-1;
+}
+
+/* Form Actions */
+.form-actions {
+ @apply flex justify-center pt-4;
+}
+
+.generate-btn {
+ @apply inline-flex items-center px-6 py-3 border border-transparent text-base font-medium rounded-md;
+ @apply text-white bg-redis-red-500 hover:bg-redis-red-600 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-redis-red-500;
+ @apply transition-colors duration-200 disabled:opacity-50 disabled:cursor-not-allowed;
+}
+
+.generate-btn:disabled {
+ @apply bg-redis-pen-600 hover:bg-redis-pen-600;
+}
+
+.btn-loading {
+ @apply flex items-center;
+}
+
+/* Code Output Section */
+.code-output-section {
+ @apply mt-8 border-t border-redis-pen-700 pt-8;
+}
+
+.code-header {
+ @apply mb-4;
+}
+
+.code-actions {
+ @apply flex flex-wrap gap-2;
+}
+
+.copy-btn,
+.download-btn {
+ @apply inline-flex items-center px-3 py-2 border border-redis-pen-700 text-sm font-medium rounded-md;
+ @apply text-redis-ink-900 bg-white hover:bg-redis-pen-200 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-redis-red-500;
+ @apply transition-colors duration-200;
+}
+
+.jupyter-btn {
+ @apply inline-flex items-center px-3 py-2 border border-redis-red-500 text-sm font-medium rounded-md;
+ @apply text-white bg-redis-red-500 hover:bg-redis-red-600 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-redis-red-500;
+ @apply transition-colors duration-200;
+}
+
+.copy-btn:hover,
+.download-btn:hover {
+ @apply border-redis-red-500 text-redis-red-600;
+}
+
+.jupyter-btn:hover {
+ @apply bg-redis-red-600 border-redis-red-600;
+}
+
+
+
+.start-again-btn {
+ @apply inline-flex items-center px-3 py-2 border border-orange-500 text-sm font-medium rounded-md;
+ @apply text-orange-700 bg-white hover:bg-orange-50 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-orange-500;
+ @apply transition-colors duration-200;
+}
+
+.start-again-btn:hover {
+ @apply border-orange-600 text-orange-800;
+}
+
+/* Code Container */
+.code-container {
+ @apply relative;
+}
+
+.code-block {
+ @apply bg-slate-900 text-white p-4 rounded-lg overflow-x-auto;
+ @apply font-mono text-sm leading-relaxed;
+ max-height: 500px;
+ overflow-y: auto;
+}
+
+.code-block code {
+ @apply bg-transparent text-inherit;
+}
+
+.code-footer {
+ @apply mt-4 p-4 bg-redis-pen-200 rounded-lg;
+}
+
+/* Responsive Design */
+@media (max-width: 768px) {
+ .agent-builder-container {
+ @apply p-4 mx-4;
+ }
+
+ .code-header {
+ @apply flex-col items-start space-y-2;
+ }
+
+ .code-actions {
+ @apply w-full justify-start flex-wrap;
+ }
+
+ .copy-btn,
+ .download-btn,
+ .jupyter-btn,
+ .start-again-btn {
+ @apply flex-1 justify-center;
+ }
+}
+
+/* Loading Animation */
+@keyframes spin {
+ from {
+ transform: rotate(0deg);
+ }
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+.animate-spin {
+ animation: spin 1s linear infinite;
+}
+
+/* Focus States */
+.form-select:focus,
+.form-input:focus,
+.generate-btn:focus,
+.copy-btn:focus,
+.download-btn:focus,
+.jupyter-btn:focus {
+ @apply outline-none ring-2 ring-offset-2 ring-redis-red-500;
+}
+
+.start-again-btn:focus {
+ @apply outline-none ring-2 ring-offset-2 ring-orange-500;
+}
+
+/* Validation States */
+.form-select:invalid,
+.form-input:invalid {
+ @apply border-red-300;
+}
+
+.form-select:valid,
+.form-input:valid {
+ @apply border-green-300;
+}
+
+/* Hover Effects */
+.form-select:hover,
+.form-input:hover {
+ @apply border-redis-pen-600;
+}
+
+/* Disabled States */
+.form-select:disabled,
+.form-input:disabled {
+ @apply bg-redis-pen-200 cursor-not-allowed opacity-60;
+}
+
+/* Success States */
+.success-message {
+ @apply text-green-600 text-sm mt-2;
+}
+
+.error-message {
+ @apply text-red-600 text-sm mt-2;
+}
+
+/* Code Syntax Highlighting Enhancement */
+.code-block {
+ scrollbar-width: thin;
+ scrollbar-color: #4a5568 #2d3748;
+}
+
+.code-block::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+.code-block::-webkit-scrollbar-track {
+ background: #2d3748;
+}
+
+.code-block::-webkit-scrollbar-thumb {
+ background: #4a5568;
+ border-radius: 4px;
+}
+
+.code-block::-webkit-scrollbar-thumb:hover {
+ background: #718096;
+}
+
+
+/* Markdown Rendering Styles for Chat Messages */
+.chat-message .message-content h1,
+.chat-message .message-content h2,
+.chat-message .message-content h3,
+.chat-message .message-content h4,
+.chat-message .message-content h5,
+.chat-message .message-content h6 {
+ @apply font-semibold text-redis-ink-900 mt-4 mb-2;
+}
+
+.chat-message .message-content h1 { @apply text-xl; }
+.chat-message .message-content h2 { @apply text-lg; }
+.chat-message .message-content h3 { @apply text-base; }
+
+.chat-message .message-content p {
+ @apply mb-3 last:mb-0;
+}
+
+.chat-message .message-content ul,
+.chat-message .message-content ol {
+ @apply mb-3 pl-6;
+}
+
+.chat-message .message-content ul {
+ @apply list-disc;
+}
+
+.chat-message .message-content ol {
+ @apply list-decimal;
+}
+
+.chat-message .message-content li {
+ @apply mb-1;
+}
+
+.chat-message .message-content code {
+ @apply bg-redis-pen-200 text-redis-ink-900 px-1 py-0.5 rounded text-sm font-mono;
+}
+
+.chat-message .message-content pre {
+ @apply bg-redis-pen-200 text-redis-ink-900 p-3 rounded-lg mb-3 overflow-x-auto;
+}
+
+.chat-message .message-content pre code {
+ @apply bg-transparent p-0;
+}
+
+.chat-message .message-content blockquote {
+ @apply border-l-4 border-redis-pen-700 pl-4 italic text-redis-pen-600 mb-3;
+}
+
+.chat-message .message-content strong {
+ @apply font-semibold;
+}
+
+.chat-message .message-content em {
+ @apply italic;
+}
+
+.chat-message .message-content a {
+ @apply text-redis-red-600 hover:text-redis-red-700 underline;
+}
+
+.chat-message .message-content table {
+ @apply w-full border-collapse border border-redis-pen-300 mb-3;
+}
+
+.chat-message .message-content th,
+.chat-message .message-content td {
+ @apply border border-redis-pen-300 px-3 py-2 text-left;
+}
+
+.chat-message .message-content th {
+ @apply bg-redis-pen-200 font-semibold;
+}
+
+/* Utility Classes */
+.agent-builder-hidden {
+ display: none !important;
+}
+
+.agent-builder-inactive {
+ opacity: 0.5 !important;
+ pointer-events: none !important;
+ cursor: not-allowed !important;
+}
+
+.sr-only {
+ position: absolute;
+ width: 1px;
+ height: 1px;
+ padding: 0;
+ margin: -1px;
+ overflow: hidden;
+ clip: rect(0, 0, 0, 0);
+ white-space: nowrap;
+ border: 0;
+}
diff --git a/content/develop/ai/agent-builder/_index.md b/content/develop/ai/agent-builder/_index.md
new file mode 100644
index 0000000000..8d4859de79
--- /dev/null
+++ b/content/develop/ai/agent-builder/_index.md
@@ -0,0 +1,74 @@
+---
+Title: Redis AI Agent Builder
+alwaysopen: false
+categories:
+- docs
+- develop
+- ai
+description: Build custom AI agents powered by Redis with our interactive code generator
+linkTitle: Agent Builder
+hideListLinks: true
+weight: 50
+---
+
+
+Agents use Redis for data storage, vector search, and conversation memory. The interactive builder generates production-ready code in your preferred programming language.
+
+## Get started
+
+Use the interactive builder below to generate your custom AI agent code:
+
+{{< agent-builder >}}
+
+## What are AI agents?
+
+AI agents are intelligent systems that can plan, remember, and take actions to help users accomplish goals. Unlike simple chatbots, agents can remember conversations, plan multi-step tasks, use external tools, and learn from interactions.
+
+Redis powers these capabilities with fast, reliable data storage and retrieval that keeps your agents responsive and intelligent.
+
+**Learn more**: [How agents work](agent-concepts/)
+
+## What you can build
+
+Choose from two types of intelligent agents:
+
+- **Recommendation engines**: Personalized product and content recommendations
+- **Conversational assistants**: Chatbots with memory and context awareness
+
+The agent builder will generate complete, working code examples for your chosen agent type.
+
+## Features
+
+- **Multiple programming languages**: Generate code in Python, with JavaScript (Node.js), Java, and C# coming soon
+- **LLM integration**: Support for OpenAI, Anthropic Claude, and Llama 2
+- **Redis optimized**: Uses Redis data structures for optimal performance
+
+## After you generate your code
+
+Use the **Copy** or **Download** buttons and follow the steps below to test it locally:
+
+1. **Set up your environment**: Install Redis and the required dependencies
+2. **Configure your API keys**: Add your LLM provider credentials to environment variables
+3. **Test locally**: Start with simple conversations to verify everything works
+4. **Deploy and scale**: Use Redis Cloud for production deployments
+
+The generated code includes detailed setup instructions and best practices to get you started quickly. If you want to generate another agent, select the **Start again** button.
+
+## Learn more
+
+### AI agent resources
+
+- [How agents work](agent-concepts/) - Learn how agents work and why Redis is perfect for them
+
+### Redis AI documentation
+
+- [Redis Vector Search](/develop/interact/search-and-query/vector-search/) - Semantic search capabilities
+- [Redis Streams](/develop/data-types/streams/) - Real-time data and conversation history
+- [AI Notebooks Collection](/develop/ai/notebook-collection/) - Interactive tutorials and examples
+- [Ecosystem Integrations](/develop/ai/ecosystem-integrations/) - Redis with AI frameworks
+
+### Community and support
+
+- Join the [Redis Discord](https://discord.gg/redis) for community support
+- Explore [Redis AI Resources on GitHub](https://github.com/redis-developer/redis-ai-resources)
+- Watch [AI Video Collection](/develop/ai/ai-videos/) for tutorials and demonstrations
diff --git a/content/develop/ai/agent-builder/agent-concepts.md b/content/develop/ai/agent-builder/agent-concepts.md
new file mode 100644
index 0000000000..bd93cd72c8
--- /dev/null
+++ b/content/develop/ai/agent-builder/agent-concepts.md
@@ -0,0 +1,376 @@
+---
+Title: How agents work
+alwaysopen: false
+categories:
+- docs
+- develop
+- ai
+description: Learn how AI agents work and why Redis is the perfect foundation for building intelligent systems
+linkTitle: How agents work
+weight: 10
+---
+
+## How AI agents work
+
+AI agents are autonomous systems that go far beyond simple chatbots. They combine large language models (LLMs) with external tools, memory, and planning capabilities to accomplish complex tasks.
+
+**Key differences from chatbots:**
+- Maintain state across multiple conversations
+- Reason through problems step-by-step
+- Take actions in the real world
+- Learn and adapt from interactions
+
+### Core agent architecture
+
+{{< image filename="/images/ai_agent/ai-agent-architecture-diagram.svg" alt="AI agent architecture" >}}
+
+### The agent processing cycle
+
+Every user interaction follows a 6-step cycle that makes agents intelligent:
+
+{{< image filename="/images/ai_agent/simple-processing-cycle.svg" alt="AI agent processing cycle" >}}
+
+Why this cycle matters:
+- Maintains context across multiple conversations
+- Learns from experience to improve future responses
+- Handles complex tasks that require multiple steps
+- Recovers from failures and adapts plans in real-time
+
+> Example: When you ask "Book me a flight to Paris and find a hotel," the agent breaks this into separate tasks, remembers your travel preferences, searches for options, and coordinates the booking process.
+
+## Why Redis powers AI agents
+
+Redis is the **ideal foundation** for AI agents because it excels at the three things agents need most: **speed**, **memory**, and **search**.
+
+### Redis powers every part of your agent
+
+
+
+
Planner
+
Stores workflow templates and agent plans as Hashes or JSON. Enables complex multi-step reasoning.
+
+
+
+
Retriever
+
Vector Search finds semantically similar documents instantly. Supports hybrid search for better results.
+
+
+
+
Executor
+
Stores conversation history, user preferences, and intermediate results. Maintains state across workflows.
+
+
+
+### Key advantages
+
+**Ultra-fast response times**
+- Sub-millisecond data access keeps conversations flowing naturally
+- In-memory processing eliminates disk I/O bottlenecks
+- Optimized data structures for different use cases
+
+**Built-in vector search**
+- Native vector indexing with HNSW, FLAT, and SVS-VAMANA algorithms
+- SVS-VAMANA leverages Intel hardware acceleration for enhanced performance
+- Hybrid search combining vector similarity with metadata filtering
+- Real-time updates without index rebuilds
+- [Learn more about Redis Vector Search →](/develop/interact/search-and-query/advanced-concepts/vectors/)
+
+**Agent memory**
+- **Short-term**: Conversation context and session state
+- **Long-term**: User preferences and learned patterns
+- Flexible data structures (Hashes, Lists, Streams, JSON) for different memory types
+- [Explore Redis data structures →](/develop/data-types/)
+
+## Types of agents you can build
+
+
+
+
+
+
+
Conversational assistants
+
+Build chatbots and virtual assistants that:
+- Maintain natural conversations with context and memory
+- Handle multiple topics within a single conversation
+- Provide personalized responses based on user history
+- Escalate to human agents when needed
+
+[Build a conversational agent →](../)
+
+
+
+
Recommendation engines
+
+Create intelligent recommendation systems that:
+- Learn from user behavior and preferences
+- Provide real-time personalized suggestions
+- Handle both explicit feedback (ratings) and implicit signals (clicks, time spent)
+- Scale to millions of users and items
+
+[Build a recommendation agent →](../)
+
+
+
+
Task automation agents
+
+Automate complex workflows and business processes:
+- Execute multi-step tasks with decision-making
+- Integrate with APIs and external systems
+- Handle error recovery and retry logic
+- Monitor and report on task completion
+
+
+
+
Data analysis agents
+
+Process and analyze large datasets intelligently:
+- Perform statistical analysis and pattern recognition
+- Generate insights and reports automatically
+- Handle real-time data streams
+- Create visualizations and dashboards
+
+
+
+
Content generation agents
+
+Create and manage content at scale:
+- Generate articles, summaries, and documentation
+- Adapt content for different audiences and formats
+- Maintain brand voice and style consistency
+- Handle content moderation and quality control
+
+
+
+
+
+
+
+
Customer support agents
+
+Provide intelligent customer service:
+- Answer questions using knowledge bases
+- Route complex issues to human agents
+- Track customer satisfaction and feedback
+- Learn from interactions to improve responses
+
+
+
+
Research and retrieval agents
+
+Find and synthesize information from multiple sources:
+- Search across documents, databases, and web content
+- Summarize findings and extract key insights
+- Fact-check and verify information accuracy
+- Maintain up-to-date knowledge repositories
+
+
+
+
Monitoring and alerting agents
+
+Watch systems and notify when action is needed:
+- Monitor application performance and health
+- Detect anomalies and security threats
+- Send intelligent alerts with context
+- Suggest remediation actions
+
+
+
+
Personal productivity agents
+
+Help users manage tasks and information:
+- Schedule meetings and manage calendars
+- Organize and prioritize tasks
+- Provide reminders and follow-ups
+- Learn user preferences and habits
+
+
+
+
Trading and financial agents
+
+Make intelligent financial decisions:
+- Analyze market data and trends
+- Execute trades based on predefined strategies
+- Manage risk and portfolio optimization
+- Generate financial reports and insights
+
+
+
+
+
+
+## Agent architecture patterns
+
+### Single-agent systems
+
+Simple agents that handle all tasks within one system:
+- Easier to develop and maintain
+- Good for focused use cases
+- All logic contained in one place
+- Suitable for most applications
+
+### Multi-agent systems
+
+Multiple specialized agents working together:
+- Each agent handles specific domains or tasks
+- agents can communicate and coordinate
+- More complex but more scalable
+- Good for enterprise applications
+
+### Hierarchical agents
+
+Agents organized in layers with different responsibilities:
+- High-level agents handle planning and coordination
+- Low-level agents execute specific tasks
+- Clear separation of concerns
+- Easier to debug and maintain
+
+
+## Redis data structures for agent memory
+
+Understanding how to map agent memory needs to Redis data structures is crucial for building efficient agents:
+
+### Redis streams for conversation history
+
+- Use case: Ordered conversation logs with timestamps and metadata
+- Key benefits: Automatic ordering, range queries, consumer groups, guaranteed delivery
+- Implementation: Store user/agent message pairs with rich contextual metadata
+- Retention: Use XTRIM for automatic cleanup based on age or count limits
+- [Learn about Redis Streams →](/develop/data-types/streams/)
+
+
+### Redis Hashes for User Profiles
+
+- Use case: Structured user data with frequent partial updates and atomic operations
+- Key benefits: Memory efficient field-level operations, atomic updates, O(1) field access
+- Implementation: Multi-layered profile system with preferences, behavior patterns, and learned data
+- Scaling: Hash tags for cluster distribution, field expiration for data lifecycle management
+- [Learn about Redis Hashes →](/develop/data-types/hashes/)
+
+### Redis JSON for Complex State
+
+- Use case: Nested data structures, complex agent workflows, hierarchical configurations
+- Key benefits: JSONPath queries, atomic nested updates, schema validation, efficient storage
+- Implementation: Multi-step task orchestration, complex decision trees, dynamic configurations
+- Querying: Advanced JSONPath expressions for complex data retrieval and manipulation
+- [Learn about Redis JSON →](/develop/data-types/json/)
+
+### Redis Sets for Relationships and Tags
+
+- Use case: Entity relationships, user interest tracking, session management, feature flags
+- Key benefits: O(1) membership testing, efficient set operations, automatic deduplication
+- Implementation: Complex relationship modeling, real-time recommendation engines, access control
+- Operations: Union, intersection, difference for advanced analytics and personalization
+- [Learn about Redis Sets →](/develop/data-types/sets/)
+
+### Redis Vector Sets for Semantic Search
+
+- Use case: Embedding storage, similarity search, semantic retrieval, content recommendations
+- Key benefits: High-performance vector similarity search, multiple distance metrics, real-time indexing
+- Implementation: RAG systems, semantic memory, content discovery, personalized recommendations
+- Queries: K-nearest neighbor search, range queries, hybrid filtering with metadata
+- [Learn about Redis Vector Sets →](/develop/data-types/vector/)
+
+### Redis Sorted Sets for Rankings and Priorities
+
+- Use case: Dynamic scoring systems, priority queues, leaderboards, time-series data
+- Key benefits: O(log N) insertions, range queries by score/rank, atomic score updates
+- Implementation: Real-time recommendation scoring, task prioritization, performance analytics
+- Queries: Range by score, rank, lexicographical order, and complex aggregations
+- [Learn about Redis Sorted Sets →](/develop/data-types/sorted-sets/)
+
+## Reliability features
+
+Production-ready agents include built-in reliability features:
+
+### Error handling
+
+- Gracefully handle API failures and unexpected inputs
+- Provide helpful error messages when things go wrong
+- Continue functioning even when some components fail
+
+### Retry logic
+
+- Automatically retry failed operations with exponential backoff
+- Handle temporary network issues and rate limiting
+- Ensure important operations complete successfully
+
+### Logging and monitoring
+
+- Track what your agent does for debugging and improvement
+- Monitor performance metrics like response times
+- Log errors and unusual behavior for investigation
+
+### Performance optimization
+
+- Cache frequently accessed information
+- Use efficient data structures for fast retrieval
+- Scale resources based on demand
+
+## Production deployment considerations
+
+### Monitoring and Observability
+
+- Agent performance metrics: Response times, success rates, user satisfaction
+- Redis metrics: Memory usage, connection counts, operation latencies
+- LLM usage tracking: Token consumption, API costs, rate limiting
+- Business metrics: Task completion rates, user engagement, conversion
+
+### Security and Privacy
+
+- Data encryption: Encrypt sensitive data at rest and in transit
+- Access controls: Implement proper authentication and authorization
+- Data retention: Automatic cleanup of personal data per regulations
+- Audit logging: Track all data access and modifications
+
+### Scaling Strategies
+
+- Horizontal scaling: Multiple agent instances with shared Redis state
+- Load balancing: Distribute requests across agent instances
+- Redis clustering: Scale data storage across multiple nodes
+- Caching layers: CDN for static content, Redis for dynamic data
+- [Learn about Redis scaling →](/operate/rs/clusters/)
+
+### Cost Optimization
+
+- LLM cost management: Use appropriate models for different tasks
+- Redis memory optimization: Efficient data structures and TTL policies
+- API rate limiting: Prevent excessive external API calls
+- Resource monitoring: Track and optimize compute and storage costs
+- [Redis performance optimization →](/operate/rs/administering/database-operations/memory-performance/)
+
+---
+
+## Key takeaways
+
+
+
+**What makes agents different:**
+Agents maintain memory, plan multi-step tasks, and learn from interactions—unlike simple chatbots.
+
+**Why Redis is perfect:**
+Sub-millisecond data access, built-in vector search, and flexible data structures designed for agent workflows.
+
+**What you can build:**
+Conversational assistants, recommendation engines, and complex multi-agent systems.
+
+
+
+## Next steps
+
+Ready to build your AI agent with Redis?
+
+**Get started:**
+- [Use the agent builder](/develop/ai/agent-builder/) to generate your code and get started
+- [Redis quick start guide](/develop/get-started/) for setting up Redis
+
+**Learn more:**
+- [Redis Vector Search documentation](/develop/interact/search-and-query/advanced-concepts/vectors/)
+- [RedisVL Python library](/develop/clients/redisvl/) for vector operations and AI workflows
+- [Redis data structures guide](/develop/data-types/)
+- [Redis client libraries](/develop/clients/) for your programming language
+
+**Deploy and scale:**
+- [Redis Cloud](/operate/rc/) for managed Redis hosting
+- [Redis Enterprise](/operate/rs/) for on-premises deployment
+- [Performance optimization](/operate/rs/administering/database-operations/memory-performance/) best practices
diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html
index d6fb8548ff..73478fac75 100644
--- a/layouts/_default/baseof.html
+++ b/layouts/_default/baseof.html
@@ -46,7 +46,9 @@
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','{{ .Site.Params.tagManagerId }}');
{{ end -}}
-
+
+
+
{{ if .Params.math }}
{{ partial "mathjax.html" }}
diff --git a/layouts/shortcodes/agent-builder.html b/layouts/shortcodes/agent-builder.html
new file mode 100644
index 0000000000..2c0ec21ee2
--- /dev/null
+++ b/layouts/shortcodes/agent-builder.html
@@ -0,0 +1,94 @@
+
+
+
+
+
+
+
+
Build Your Redis AI Agent
+
Tell us what you want to build and we'll generate the code for you
+
+
+
+
+
+
+
+
+
+
+
+
Hi! I'll help you build a Redis AI agent. Let's start with what type of agent you want to create.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Generated Agent Code
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/static/code/agent-templates/python/conversational_agent.py b/static/code/agent-templates/python/conversational_agent.py
new file mode 100644
index 0000000000..7e24783346
--- /dev/null
+++ b/static/code/agent-templates/python/conversational_agent.py
@@ -0,0 +1,137 @@
+'''
+Redis Conversational Agent
+Uses RedisVL Semantic Message History to manage conversation history
+
+To run this code:
+ Install dependencies:
+ pip install redisvl[all] redis os openai
+
+ Set environment variables:
+ export LLM_API_KEY=your_${formData.llmModel.toLowerCase()}_api_key
+ export LLM_API_BASE_URL=your_${formData.llmModel.toLowerCase()}_api_base_url
+ (optional - default: ${CONFIG.models[formData.llmModel].baseUrl})
+ export LLM_MODEL=your_${formData.llmModel.toLowerCase()}_model
+ (optional - default: ${CONFIG.models[formData.llmModel].defaultModel})
+ export REDIS_HOST=your_redis_host
+ export REDIS_PORT=your_redis_port
+ export REDIS_PASSWORD=your_redis_password
+'''
+
+from redisvl.extensions.message_history import SemanticMessageHistory
+import redis
+import os
+import openai
+
+class ConversationalAgent:
+ def __init__(self, session_name="chat"):
+ # Get API key from environment variables
+ self.llm_api_key = os.getenv('LLM_API_KEY')
+ if not self.llm_api_key:
+ raise ValueError("LLM_API_KEY environment variable is required")
+ self.llm_base_url = os.getenv('LLM_API_BASE_URL', '${CONFIG.models[formData.llmModel].baseUrl}')
+ self.llm_model = os.getenv('LLM_MODEL', '${CONFIG.models[formData.llmModel].defaultModel}')
+
+ # Connect to Redis
+ try:
+ self.redis_client = redis.Redis(
+ host=os.getenv('REDIS_HOST', 'localhost'),
+ port=int(os.getenv('REDIS_PORT', 6379)),
+ username=os.getenv('REDIS_USERNAME', 'default'),
+ password=os.getenv('REDIS_PASSWORD', ''),
+ decode_responses=True
+ )
+ # Test Redis connection
+ self.redis_client.ping()
+ print("Connected to Redis successfully")
+
+ except redis.ConnectionError as e:
+ print(f"Failed to connect to Redis: {e}")
+ print("Please check your Redis connection settings and ensure Redis is running.")
+ raise
+ except Exception as e:
+ print(f"Redis connection error: {e}")
+ raise
+
+ # Initialize LLM client with error handling
+ try:
+ self.client = openai.OpenAI(api_key=self.llm_api_key, base_url=self.llm_base_url)
+ # Test LLM connection with a simple call
+ test_response = self.client.chat.completions.create(
+ model=self.llm_model,
+ messages=[{"role": "user", "content": "Hello"}],
+ max_tokens=5
+ )
+ print("Connected to LLM successfully")
+
+ except openai.AuthenticationError:
+ print("LLM authentication failed. Please check your API key.")
+ raise
+ except Exception as e:
+ print(f"LLM connection error: {e}")
+ raise
+
+ # Initialize session manager
+ self.session_manager = SemanticMessageHistory(
+ name=session_name,
+ redis_client=self.redis_client
+ )
+
+ def chat(self, user_input: str, session_tag: str = None) -> str:
+ # Get relevant conversation history
+ self.session_manager.set_distance_threshold(0.9)
+ context = self.session_manager.get_relevant(user_input, top_k=8)
+
+ # Build messages with context
+ messages = [{"role": "system", "content": "You are a helpful assistant that will answer questions based on the conversation history."}]
+ messages.extend(context)
+ messages.append({"role": "user", "content": user_input})
+
+ # Get LLM response
+ try:
+ response = self.client.chat.completions.create(
+ model=self.llm_model,
+ messages=messages
+ )
+ except Exception as e:
+ print(f"Error getting LLM response: {e}")
+ return "Sorry, I'm having trouble understanding your question. Please try again later."
+
+ assistant_response = response.choices[0].message.content
+
+ # Store the conversation
+ try:
+ self.session_manager.add_messages([
+ {"role": "user", "content": user_input},
+ {"role": "assistant", "content": assistant_response}
+ ], session_tag)
+ except Exception as e:
+ print(f"Error storing conversation: {e}")
+
+ return assistant_response
+
+if __name__ == "__main__":
+ try:
+ agent = ConversationalAgent()
+ print(agent.chat("Tell me about yourself."))
+ while True:
+ try:
+ prompt = input('Enter a prompt: ')
+
+ if prompt.lower() in ['quit', 'exit', 'bye']:
+ print("Thanks for using! Goodbye!")
+ break
+
+ print(agent.chat(prompt))
+ except KeyboardInterrupt:
+ print("\n\nGoodbye!")
+ break
+ except Exception as e:
+ print(f"An error occurred: {e}")
+ print("Please try again or type 'quit' to exit.")
+ except ValueError as e:
+ print(f"Configuration error: {e}")
+ print("Please check your environment variables and try again.")
+ exit(1)
+ except Exception as e:
+ print(f"Failed to initialize the conversational agent: {e}")
+ exit(1)
\ No newline at end of file
diff --git a/static/code/agent-templates/python/recommendation_agent.py b/static/code/agent-templates/python/recommendation_agent.py
new file mode 100644
index 0000000000..0e30f42e70
--- /dev/null
+++ b/static/code/agent-templates/python/recommendation_agent.py
@@ -0,0 +1,382 @@
+'''
+Redis Recommendation Engine
+
+To run this code:
+ Install dependencies:
+ pip install pandas redisvl[all] redis openai
+
+ Set environment variables:
+ export LLM_API_KEY=your_${formData.llmModel.toLowerCase()}_api_key
+ export LLM_API_BASE_URL=your_${formData.llmModel.toLowerCase()}_api_base_url
+ (optional - default: ${CONFIG.models[formData.llmModel].baseUrl})
+ export LLM_MODEL=your_${formData.llmModel.toLowerCase()}_model
+ (optional - default: ${CONFIG.models[formData.llmModel].defaultModel})
+ export REDIS_HOST=your_redis_host
+ export REDIS_PORT=your_redis_port
+ export REDIS_PASSWORD=your_redis_password
+
+The datasets are downloadable from here:
+
+- https://redis-ai-resources.s3.us-east-2.amazonaws.com/recommenders/datasets/collaborative-filtering/ratings_small.csv and
+- https://redis-ai-resources.s3.us-east-2.amazonaws.com/recommenders/datasets/collaborative-filtering/movies_metadata.csv
+
+Download those files and place them in the 'datasets/collaborative_filtering/' directory, local to your code.
+'''
+import os
+
+import openai
+import pandas as pd
+import redis
+
+from redisvl.schema import IndexSchema
+from redisvl.index import SearchIndex
+from redisvl.query import FilterQuery
+from redisvl.query.filter import Num
+
+class RecommendationAgent:
+ def __init__(self, session_name="movie_recommendations"):
+ # Validate environment variables
+ self.llm_api_key = os.getenv('LLM_API_KEY')
+ if not self.llm_api_key:
+ raise ValueError("LLM_API_KEY environment variable is required")
+
+ self.llm_base_url = os.getenv('LLM_API_BASE_URL', '${CONFIG.models[formData.llmModel].baseUrl}')
+ self.llm_model = os.getenv('LLM_MODEL', '${CONFIG.models[formData.llmModel].defaultModel}')
+
+ # Connect to Redis with error handling
+ try:
+ self.redis_client = redis.Redis(
+ host=os.getenv('REDIS_HOST', 'localhost'),
+ port=int(os.getenv('REDIS_PORT', 6379)),
+ username=os.getenv('REDIS_USERNAME', 'default'),
+ password=os.getenv('REDIS_PASSWORD', ''),
+ decode_responses=True,
+ socket_connect_timeout=5,
+ socket_timeout=5
+ )
+ # Test Redis connection
+ self.redis_client.ping()
+ print("Connected to Redis successfully")
+
+ except redis.ConnectionError as e:
+ print(f"Failed to connect to Redis: {e}")
+ print("Please check your Redis connection settings and ensure Redis is running.")
+ raise
+ except Exception as e:
+ print(f"Redis connection error: {e}")
+ raise
+
+ # Initialize LLM client with error handling
+ try:
+ self.client = openai.OpenAI(api_key=self.llm_api_key, base_url=self.llm_base_url)
+ # Test LLM connection with a simple call
+ test_response = self.client.chat.completions.create(
+ model=self.llm_model,
+ messages=[{"role": "user", "content": "Hello"}],
+ max_tokens=5
+ )
+ print("Connected to LLM successfully")
+
+ except openai.AuthenticationError:
+ print("LLM authentication failed. Please check your API key.")
+ raise
+ except Exception as e:
+ print(f"LLM connection error: {e}")
+ raise
+
+ # Initialize the movie index
+ self.index = None
+ self.movies_df = None
+ self._setup_movie_index()
+
+ def _setup_movie_index(self):
+ """Load movie data and create Redis index"""
+ try:
+ print("Loading movie datasets...")
+
+ # Check if data files exist
+ ratings_file = 'datasets/collaborative_filtering/ratings_small.csv'
+ movies_file = 'datasets/collaborative_filtering/movies_metadata.csv'
+
+ if not os.path.exists(ratings_file):
+ raise FileNotFoundError(f"Ratings dataset not found: {ratings_file}")
+ if not os.path.exists(movies_file):
+ raise FileNotFoundError(f"Movies dataset not found: {movies_file}")
+
+ # Load and prepare data
+ ratings_df = pd.read_csv(ratings_file)[['userId', 'movieId', 'rating']]
+ movies_df = pd.read_csv(movies_file)[['id', 'title', 'genres', 'revenue']].dropna()
+
+ if ratings_df.empty or movies_df.empty:
+ raise ValueError("One or more datasets are empty")
+
+ print(f"Loaded {len(ratings_df)} ratings and {len(movies_df)} movies")
+
+ # Calculate movie popularity metrics
+ movie_stats = ratings_df.groupby('movieId').agg({
+ 'rating': ['count', 'mean']
+ }).round(2)
+ movie_stats.columns = ['rating_count', 'avg_rating']
+ movie_stats['popularity_score'] = movie_stats['rating_count'] * movie_stats['avg_rating']
+ movie_stats = movie_stats.reset_index()
+
+ # Merge with movie metadata
+ movies_df['movieId'] = movies_df['id'].astype(str)
+ movie_stats['movieId'] = movie_stats['movieId'].astype(str)
+ self.movies_df = movies_df.merge(movie_stats, on='movieId', how='inner')
+
+ if self.movies_df.empty:
+ raise ValueError("No movies found after merging ratings and metadata")
+
+ print(f"Processed {len(self.movies_df)} movies with ratings")
+
+ # Create RedisVL index for popularity-based search
+ schema = IndexSchema.from_dict({
+ 'index': {'name': 'movies', 'prefix': 'movie', 'storage_type': 'json'},
+ 'fields': [
+ {'name': 'movieId', 'type': 'tag'},
+ {'name': 'title', 'type': 'text'},
+ {'name': 'genres', 'type': 'tag'},
+ {'name': 'revenue', 'type': 'numeric'},
+ {'name': 'rating_count', 'type': 'numeric'},
+ {'name': 'avg_rating', 'type': 'numeric'},
+ {'name': 'popularity_score', 'type': 'numeric'}
+ ]
+ })
+
+ print("Creating Redis search index...")
+ self.index = SearchIndex(schema, redis_client=self.redis_client)
+ self.index.create(overwrite=True)
+ self.index.load(self.movies_df.to_dict(orient='records'))
+
+ print("Movie recommendation system initialized successfully!")
+
+ except FileNotFoundError as e:
+ print(f"Data files not found: {e}")
+ print("Please ensure the movie datasets are available in the 'datasets/collaborative_filtering/' directory")
+ print("Expected files:")
+ print(" - datasets/collaborative_filtering/ratings_small.csv")
+ print(" - datasets/collaborative_filtering/movies_metadata.csv")
+ self.index = None
+ except pd.errors.EmptyDataError as e:
+ print(f"Data file is empty or corrupted: {e}")
+ self.index = None
+ except Exception as e:
+ print(f"Error setting up movie index: {e}")
+ self.index = None
+
+ def get_popular_movies(self, filters=None, num_results=10, sort_by='popularity_score', sort_order='desc'):
+ """Get popular movies based on filters and sorting criteria
+
+ Args:
+ filters: Filter expression for the query
+ num_results: Number of results to return
+ sort_by: Field to sort by ('popularity_score', 'avg_rating', 'rating_count', 'revenue')
+ sort_order: Sort order ('asc' for ascending, 'desc' for descending)
+ """
+ if not self.index:
+ return []
+
+ # Create the query
+ query = FilterQuery(
+ filter_expression=filters,
+ return_fields=['title', 'genres', 'rating_count', 'avg_rating', 'popularity_score'],
+ num_results=num_results
+ )
+
+ # Add sorting with specified order
+ asc_order = sort_order.lower() == 'asc'
+ query.sort_by(sort_by, asc=asc_order)
+
+ results = self.index.query(query)
+ return [(r['title'], r['genres'], r['rating_count'], r['avg_rating'], r['popularity_score'])
+ for r in results]
+
+ def get_top_rated_movies(self, min_reviews=50, num_results=10, sort_order='desc'):
+ """Get highest rated movies with minimum review count"""
+ filters = Num('rating_count') >= min_reviews
+ return self.get_popular_movies(filters=filters, num_results=num_results, sort_by='avg_rating', sort_order=sort_order)
+
+ def get_blockbuster_movies(self, min_revenue=50_000_000, num_results=10, sort_order='desc'):
+ """Get blockbuster movies with high revenue"""
+ filters = Num('revenue') >= min_revenue
+ return self.get_popular_movies(filters=filters, num_results=num_results, sort_by='revenue', sort_order=sort_order)
+
+ def get_hidden_gems(self, max_reviews=200, min_rating=7.5, num_results=10, sort_order='desc'):
+ """Get lesser-known but highly rated movies"""
+ filters = (Num('rating_count') <= max_reviews) & (Num('avg_rating') >= min_rating)
+ return self.get_popular_movies(filters=filters, num_results=num_results, sort_by='avg_rating', sort_order=sort_order)
+
+ def get_movies_by_genre(self, genre, num_results=10, sort_by='popularity_score', sort_order='desc'):
+ """Get movies filtered by genre (post-processing)"""
+ all_movies = self.get_popular_movies(num_results=num_results*3, sort_by=sort_by, sort_order=sort_order)
+ genre_movies = []
+
+ for movie in all_movies:
+ movie_genres = movie[1].lower() if movie[1] else ""
+ if genre.lower() in movie_genres:
+ genre_movies.append(movie)
+ if len(genre_movies) >= num_results:
+ break
+
+ return genre_movies
+
+ def cleanup(self):
+ """Clean up Redis index"""
+ if self.index:
+ self.index.clear()
+
+ def _parse_user_query(self, user_query: str) -> dict:
+ """Use LLM to parse user query and extract recommendation parameters"""
+ try:
+ system_prompt = """You are a movie recommendation assistant. Parse the user's query and extract relevant parameters for movie filtering.
+
+Return a JSON object with these fields (use null if not specified):
+- "genres": array of genre strings (e.g., ["Action", "Comedy"])
+- "min_rating": minimum average rating (0-10)
+- "min_reviews": minimum number of reviews
+- "max_results": number of recommendations (default 5, max 10)
+- "sort_by": "popularity_score", "avg_rating", "rating_count", or "revenue"
+- "sort_order": "desc" for descending (highest first), "asc" for ascending (lowest first). Default "desc"
+- "revenue_filter": "blockbuster" for high-revenue movies, null otherwise
+
+Examples:
+- "action movies" -> {"genres": ["Action"], "min_rating": null, "min_reviews": null, "max_results": 5, "sort_by": "popularity_score", "sort_order": "desc", "revenue_filter": null}
+- "highly rated comedies with at least 100 reviews" -> {"genres": ["Comedy"], "min_rating": 7.0, "min_reviews": 100, "max_results": 5, "sort_by": "avg_rating", "sort_order": "desc", "revenue_filter": null}
+- "lowest rated movies" -> {"genres": null, "min_rating": null, "min_reviews": null, "max_results": 5, "sort_by": "avg_rating", "sort_order": "asc", "revenue_filter": null}
+- "popular blockbuster movies" -> {"genres": null, "min_rating": null, "min_reviews": 50, "max_results": 5, "sort_by": "popularity_score", "sort_order": "desc", "revenue_filter": "blockbuster"}"""
+
+ response = self.client.chat.completions.create(
+ model=self.llm_model,
+ messages=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_query}
+ ],
+ temperature=0.1
+ )
+
+ import json
+ params = json.loads(response.choices[0].message.content)
+ return params
+
+ except Exception as e:
+ print(f"Error parsing query: {e}")
+ # Return default parameters
+ return {"genres": None, "min_rating": None, "min_reviews": None, "max_results": 5, "sort_by": "popularity_score", "sort_order": "desc", "revenue_filter": None}
+
+ def recommend_movies(self, user_query: str) -> str:
+ """Process user query and return movie recommendations"""
+ if not self.index:
+ return "Sorry, the movie database is not available. Please check that the data files are present."
+
+ try:
+ # Parse user query
+ params = self._parse_user_query(user_query)
+
+ # Build filters based on parsed parameters
+ filters = []
+
+ if params.get("min_rating"):
+ filters.append(Num('avg_rating') >= params["min_rating"])
+
+ if params.get("min_reviews"):
+ filters.append(Num('rating_count') >= params["min_reviews"])
+
+ if params.get("revenue_filter") == "blockbuster":
+ filters.append(Num('revenue') > 30_000_000)
+
+ # Combine filters
+ combined_filter = None
+ if filters:
+ combined_filter = filters[0]
+ for f in filters[1:]:
+ combined_filter = combined_filter & f
+
+ # Get recommendations
+ num_results = min(params.get("max_results", 5), 10)
+ sort_by = params.get("sort_by", "popularity_score")
+ sort_order = params.get("sort_order", "desc")
+
+ movies = self.get_popular_movies(
+ filters=combined_filter,
+ num_results=num_results,
+ sort_by=sort_by,
+ sort_order=sort_order
+ )
+
+ # Filter by genre if specified (post-processing since RedisVL genre filtering can be complex)
+ if params.get("genres"):
+ genre_keywords = [g.lower() for g in params["genres"]]
+ filtered_movies = []
+ for movie in movies:
+ movie_genres = movie[1].lower() if movie[1] else ""
+ if any(keyword in movie_genres for keyword in genre_keywords):
+ filtered_movies.append(movie)
+ movies = filtered_movies[:num_results]
+
+ if not movies:
+ return "Sorry, no movies found matching your criteria. Try adjusting your preferences."
+
+ # Format response
+ response = f"Based on your request '{user_query}', here are my recommendations:\n\n"
+ for i, (title, genres, rating_count, avg_rating, popularity_score) in enumerate(movies, 1):
+ response += f"{i}. {title}\n"
+ response += f" Genres: {genres}\n"
+ response += f" Average Rating: {float(avg_rating):.1f}/10 ({int(rating_count)} reviews)\n"
+ response += f" Popularity Score: {float(popularity_score):.1f}\n\n"
+
+ return response
+
+ except Exception as e:
+ return f"Sorry, there was an error processing your request: {e}"
+
+if __name__ == "__main__":
+ try:
+ print("Initializing Redis Movie Recommendation Agent...")
+ agent = RecommendationAgent()
+
+ if not agent.index:
+ print("Failed to initialize the recommendation system.")
+ print("Please check your data files and Redis connection, then try again.")
+ exit(1)
+
+ print("\nWelcome to the Redis Movie Recommendation Agent!")
+ print("Ask me for movie recommendations and I'll help you find something great to watch.")
+ print("Type 'quit' or 'exit' to stop.\n")
+
+ # Demo some initial recommendations
+ print("Here's a quick demo of what I can do:")
+ print(agent.recommend_movies("Show me some popular movies"))
+
+ # Interactive loop
+ while True:
+ try:
+ user_input = input("\nWhat kind of movies are you looking for? ")
+
+ if user_input.lower() in ['quit', 'exit', 'bye']:
+ print("Thanks for using the movie recommendation agent! Goodbye!")
+ break
+
+ if user_input.strip():
+ response = agent.recommend_movies(user_input)
+ print(f"\n{response}")
+ else:
+ print("Please enter a movie preference or type 'quit' to exit.")
+
+ except KeyboardInterrupt:
+ print("\n\nGoodbye!")
+ break
+ except Exception as e:
+ print(f"An error occurred: {e}")
+ print("Please try again or type 'quit' to exit.")
+
+ # Cleanup
+ agent.cleanup()
+
+ except ValueError as e:
+ print(f"Configuration error: {e}")
+ print("Please check your environment variables and try again.")
+ exit(1)
+ except Exception as e:
+ print(f"Failed to initialize the recommendation agent: {e}")
+ exit(1)
diff --git a/static/css/agent-builder.css b/static/css/agent-builder.css
new file mode 100644
index 0000000000..c9de2b8a99
--- /dev/null
+++ b/static/css/agent-builder.css
@@ -0,0 +1,41 @@
+pre code.hljs{
+ display:block;overflow-x:auto;padding:1em
+}
+
+
+code.hljs{padding:3px 5px}
+
+.hljs{background:#f3f3f3;color:#444}
+
+.hljs-comment{
+ color:#7d8282;
+ font-style:italic;
+}
+
+.hljs-punctuation,.hljs-tag{color:#444a}
+
+.hljs-tag .hljs-attr,.hljs-tag .hljs-name{color:#444}
+
+.hljs-attribute,.hljs-doctag,.hljs-keyword,.hljs-meta .hljs-keyword,.hljs-name,.hljs-selector-tag{font-weight:700}
+
+.hljs-deletion,.hljs-number,.hljs-quote,.hljs-selector-class,.hljs-selector-id,.hljs-string,.hljs-template-tag,.hljs-type{
+ color:rgb(116, 145, 251)
+}
+
+.hljs-section,.hljs-title{color:rgba(248, 101, 101, 0.777);font-weight:700}
+
+.hljs-link,.hljs-operator,.hljs-regexp,.hljs-selector-attr,.hljs-selector-pseudo,.hljs-symbol,.hljs-template-variable,.hljs-variable{
+ color:rgba(248, 101, 101, 0.777)
+}
+
+.hljs-literal{color:#695}
+
+.hljs-addition,.hljs-built_in,.hljs-bullet,.hljs-code{color:#75e703c7}
+
+.hljs-meta{color:#1f7199}
+
+.hljs-meta .hljs-string{color:#38a}
+
+.hljs-emphasis{font-style:italic}
+
+.hljs-strong{font-weight:700}
diff --git a/static/images/ai_agent/ai-agent-architecture-diagram.svg b/static/images/ai_agent/ai-agent-architecture-diagram.svg
new file mode 100644
index 0000000000..bb1e4dcacd
--- /dev/null
+++ b/static/images/ai_agent/ai-agent-architecture-diagram.svg
@@ -0,0 +1,161 @@
+
diff --git a/static/images/ai_agent/simple-processing-cycle.svg b/static/images/ai_agent/simple-processing-cycle.svg
new file mode 100644
index 0000000000..4f040e9183
--- /dev/null
+++ b/static/images/ai_agent/simple-processing-cycle.svg
@@ -0,0 +1,80 @@
+
diff --git a/static/js/agent-builder.js b/static/js/agent-builder.js
new file mode 100644
index 0000000000..0acef66039
--- /dev/null
+++ b/static/js/agent-builder.js
@@ -0,0 +1,1428 @@
+/**
+ * Redis AI Agent Builder JavaScript
+ * Handles chat interface and code generation
+ */
+
+(function() {
+ 'use strict';
+
+ // Configuration and conversation flow
+ const CONFIG = {
+ agentTypes: {
+ recommendation: {
+ name: "Recommendation Engine",
+ description: "Creates an agent that recommends products based on user queries using Redis vector search and similarity matching.",
+ features: ["Vector similarity search", "Product recommendations", "User preference learning"],
+ keywords: ["recommendation", "recommend", "product", "suggest", "ecommerce", "shopping"]
+ },
+ conversational: {
+ name: "Conversational Assistant",
+ description: "A chatbot that maintains conversation history using semantic message history and provides contextual responses.",
+ features: ["Conversation memory", "Context awareness", "Multi-turn dialogue"],
+ keywords: ["chat", "conversation", "assistant", "bot", "chatbot", "talk", "dialogue"]
+ }
+ },
+ languages: {
+ python: {
+ name: "Python",
+ description: "Python with redis-py and popular AI libraries. Great for beginners and rapid prototyping.",
+ dependencies: ["redis", "openai", "numpy", "python-dotenv"],
+ keywords: ["python", "py", "beginner", "simple", "data science", "ml", "machine learning"]
+ },
+ javascript: {
+ name: "JavaScript (Node.js)",
+ description: "Node.js with redis client and AI SDKs. Perfect for web applications and APIs.",
+ dependencies: ["redis", "openai", "dotenv", "express"],
+ keywords: ["javascript", "js", "node", "nodejs", "web", "api", "frontend"]
+ },
+ java: {
+ name: "Java",
+ description: "Java with Jedis Redis client and enterprise-grade AI integrations.",
+ dependencies: ["jedis", "okhttp", "jackson", "slf4j"],
+ keywords: ["java", "enterprise", "spring", "scalable", "production"]
+ },
+ csharp: {
+ name: "C#",
+ description: "C# with StackExchange.Redis and .NET AI libraries for enterprise applications.",
+ dependencies: ["StackExchange.Redis", "Microsoft.Extensions.AI", "Newtonsoft.Json"],
+ keywords: ["c#", "csharp", "dotnet", ".net", "microsoft", "enterprise"]
+ }
+ },
+ models: {
+ openai: {
+ name: "OpenAI (GPT-4, GPT-3.5)",
+ description: "Industry-leading models with excellent performance. Requires API key and usage-based pricing.",
+ models: ["gpt-4", "gpt-3.5-turbo", "text-embedding-ada-002"],
+ keywords: ["openai", "gpt", "gpt-4", "chatgpt", "popular", "best"],
+ defaultModel: "gpt-4",
+ baseUrl: "https://api.openai.com/v1/"
+ },
+ anthropic: {
+ name: "Anthropic (Claude)",
+ description: "Claude models known for safety and reasoning capabilities. Requires API key.",
+ models: ["claude-3-opus", "claude-3-sonnet", "claude-instant"],
+ keywords: ["anthropic", "claude", "safe", "reasoning", "ethical"],
+ defaultModel: "claude-3-5-sonnet-latest",
+ baseUrl: "https://api.anthropic.com/v1/"
+ },
+ llama3: {
+ name: "Llama 3",
+ description: "Open-source model that can run locally or on your infrastructure. No API costs.",
+ models: ["llama3:8b", "llama3:70b"],
+ keywords: ["llama", "llama3", "open source", "free", "local", "self-hosted"],
+ defaultModel: "llama3:latest",
+ baseUrl: "http://localhost:11434/v1/"
+ }
+ }
+ };
+
+ // Conversation state
+ let conversationState = {
+ step: 'agent-type', // agent-type, language, model, generate
+ selections: {},
+ currentSuggestions: []
+ };
+
+ // Code chat state
+ let codeChatState = {
+ apiKey: null,
+ hasAskedForKey: false,
+ conversationHistory: []
+ };
+
+ // DOM elements
+ let elements = {};
+
+ // Initialize the agent builder
+ function init() {
+ // Wait for DOM to be ready
+ if (document.readyState === 'loading') {
+ document.addEventListener('DOMContentLoaded', init);
+ return;
+ }
+
+ // Get DOM elements
+ elements = {
+ chatContainer: document.getElementById('chat-container'),
+ chatMessages: document.getElementById('chat-messages'),
+ chatInput: document.getElementById('chat-input'),
+ sendButton: document.getElementById('send-button'),
+ suggestionsDropdown: document.getElementById('suggestions-dropdown'),
+ codeSection: document.getElementById('generated-code-section'),
+ generatedCode: document.getElementById('generated-code'),
+ copyBtn: document.getElementById('copy-code-btn'),
+ downloadBtn: document.getElementById('download-code-btn'),
+ tryJupyterBtn: document.getElementById('try-jupyter-btn'),
+ startAgainBtn: document.getElementById('start-again-btn')
+ };
+
+ // Check if elements exist (shortcode might not be on this page)
+ if (!elements.chatContainer) return;
+
+ // Check if Markdown library is available
+ if (!window.marked) {
+ console.warn('Marked library not loaded - chat messages will use plain text formatting');
+ }
+
+ // Set up event listeners
+ setupEventListeners();
+ }
+
+ function setupEventListeners() {
+ // Chat input events
+ elements.chatInput.addEventListener('input', handleInputChange);
+ elements.chatInput.addEventListener('keydown', handleKeyDown);
+ elements.chatInput.addEventListener('keydown', preventSearchModalOnSlash);
+ elements.sendButton.addEventListener('click', handleSendMessage);
+
+ // Suggestion chip clicks
+ elements.chatMessages.addEventListener('click', handleSuggestionClick);
+
+ // Suggestions dropdown
+ elements.suggestionsDropdown.addEventListener('click', handleSuggestionSelect);
+
+ // Code actions will be attached when the code section becomes visible
+ // This prevents issues with hidden elements during initial page load
+ }
+
+ // Chat interface functions
+ function handleInputChange() {
+ const input = elements.chatInput.value.trim();
+ elements.sendButton.disabled = input.length === 0;
+
+ if (input.length > 0) {
+ showSuggestions(input);
+ } else {
+ hideSuggestions();
+ }
+ }
+
+ function handleKeyDown(e) {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ handleSendMessage();
+ } else if (e.key === 'Escape') {
+ hideSuggestions();
+ }
+ }
+
+ function preventSearchModalOnSlash(event) {
+ // Prevent the global search modal from opening when typing "/" in the chat input
+ if (event.key === '/') {
+ event.stopPropagation();
+ }
+ }
+
+ function handleSendMessage() {
+ const input = elements.chatInput.value.trim();
+ if (!input) return;
+
+ // Add user message
+ addMessage(input, 'user');
+
+ // Clear input
+ elements.chatInput.value = '';
+ elements.sendButton.disabled = true;
+ hideSuggestions();
+
+ // Process the message
+ processUserMessage(input);
+ }
+
+ function handleSuggestionClick(e) {
+ const suggestionChip = e.target.closest('.suggestion-chip');
+ if (suggestionChip) {
+ const suggestion = suggestionChip.dataset.suggestion;
+ processSelection(suggestion);
+ }
+ }
+
+ function handleSuggestionSelect(e) {
+ const suggestionItem = e.target.closest('.suggestion-item');
+ if (suggestionItem) {
+ const suggestion = suggestionItem.dataset.value;
+ elements.chatInput.value = suggestion;
+ hideSuggestions();
+ handleSendMessage();
+ }
+ }
+
+ function addMessage(content, type, suggestions = null) {
+ const messageDiv = document.createElement('div');
+ messageDiv.className = `chat-message ${type}-message`;
+
+ const avatar = document.createElement('div');
+ avatar.className = 'message-avatar';
+
+ if (type === 'bot') {
+ avatar.innerHTML = `
+
+ `;
+ } else {
+ avatar.innerHTML = `
+
+ `;
+ }
+
+ const messageContent = document.createElement('div');
+ messageContent.className = 'message-content';
+ messageContent.innerHTML = `
${content}
`;
+
+ if (suggestions) {
+ const chipsDiv = document.createElement('div');
+ chipsDiv.className = 'suggestion-chips';
+ suggestions.forEach(suggestion => {
+ const chip = document.createElement('button');
+ chip.className = 'suggestion-chip';
+ chip.dataset.suggestion = suggestion.value;
+ chip.textContent = suggestion.label;
+ chipsDiv.appendChild(chip);
+ });
+ messageContent.appendChild(chipsDiv);
+ }
+
+ messageDiv.appendChild(avatar);
+ messageDiv.appendChild(messageContent);
+ elements.chatMessages.appendChild(messageDiv);
+
+ // Scroll to bottom
+ elements.chatMessages.scrollTop = elements.chatMessages.scrollHeight;
+ }
+
+ function showSuggestions(input) {
+ const suggestions = getSuggestionsForCurrentStep(input);
+ if (suggestions.length === 0) {
+ hideSuggestions();
+ return;
+ }
+
+ const suggestionsList = elements.suggestionsDropdown.querySelector('.suggestions-list');
+ suggestionsList.innerHTML = '';
+
+ suggestions.forEach(suggestion => {
+ const item = document.createElement('div');
+ item.className = 'suggestion-item';
+ item.dataset.value = suggestion.value;
+ item.innerHTML = `
+ ${suggestion.icon}
+ ${suggestion.label}
+ `;
+ suggestionsList.appendChild(item);
+ });
+
+ elements.suggestionsDropdown.classList.remove('agent-builder-hidden');
+ }
+
+ function hideSuggestions() {
+ elements.suggestionsDropdown.classList.add('agent-builder-hidden');
+ }
+
+ function getSuggestionsForCurrentStep(input) {
+ const lowerInput = input.toLowerCase();
+ let suggestions = [];
+
+ switch (conversationState.step) {
+ case 'agent-type':
+ suggestions = Object.entries(CONFIG.agentTypes).map(([key, config]) => ({
+ value: key,
+ label: config.name,
+ icon: key === 'recommendation' ? '🛍️' : '💬'
+ })).filter(s =>
+ s.label.toLowerCase().includes(lowerInput) ||
+ CONFIG.agentTypes[s.value].keywords.some(k => k.includes(lowerInput))
+ );
+ break;
+
+ case 'language':
+ suggestions = Object.entries(CONFIG.languages).map(([key, config]) => ({
+ value: key,
+ label: config.name,
+ icon: '💻'
+ })).filter(s =>
+ s.label.toLowerCase().includes(lowerInput) ||
+ CONFIG.languages[s.value].keywords.some(k => k.includes(lowerInput))
+ );
+ break;
+
+ case 'model':
+ suggestions = Object.entries(CONFIG.models).map(([key, config]) => ({
+ value: key,
+ label: config.name,
+ icon: '🤖'
+ })).filter(s =>
+ s.label.toLowerCase().includes(lowerInput) ||
+ CONFIG.models[s.value].keywords.some(k => k.includes(lowerInput))
+ );
+ break;
+ }
+
+ return suggestions.slice(0, 5); // Limit to 5 suggestions
+ }
+
+ function processUserMessage(input) {
+ // Simulate thinking delay
+ setTimeout(() => {
+ const lowerInput = input.toLowerCase();
+
+ switch (conversationState.step) {
+ case 'agent-type':
+ processAgentTypeSelection(lowerInput);
+ break;
+ case 'language':
+ processLanguageSelection(lowerInput);
+ break;
+ case 'model':
+ processModelSelection(lowerInput);
+ break;
+ }
+ }, 500);
+ }
+
+ function processSelection(selection) {
+ conversationState.selections[conversationState.step] = selection;
+
+ switch (conversationState.step) {
+ case 'agent-type':
+ processAgentTypeSelection(selection);
+ break;
+ case 'language':
+ processLanguageSelection(selection);
+ break;
+ case 'model':
+ processModelSelection(selection);
+ break;
+ }
+ }
+
+ function processAgentTypeSelection(input) {
+ let selectedType = null;
+
+ // Check if input matches a type directly
+ if (CONFIG.agentTypes[input]) {
+ selectedType = input;
+ } else {
+ // Search by keywords
+ for (const [key, config] of Object.entries(CONFIG.agentTypes)) {
+ if (config.keywords.some(keyword => input.includes(keyword))) {
+ selectedType = key;
+ break;
+ }
+ }
+ }
+
+ if (selectedType) {
+ conversationState.selections.agentType = selectedType;
+ const config = CONFIG.agentTypes[selectedType];
+
+ // Generate a default agent name based on the type
+ const defaultNames = {
+ recommendation: 'RecommendationEngine',
+ conversational: 'ConversationalAgent'
+ };
+ conversationState.selections.agentName = defaultNames[selectedType] || 'RedisAgent';
+
+ addMessage(`Great! I'll help you build a ${config.name}. ${config.description}`, 'bot');
+
+ // Move to language selection step
+ conversationState.step = 'language';
+ addMessage('Which programming language would you like to use?', 'bot', [
+ { value: 'python', label: '🐍 Python' },
+ { value: 'javascript', label: '🟨 JavaScript' },
+ { value: 'java', label: '☕ Java' },
+ { value: 'csharp', label: '🔷 C#' }
+ ]);
+ } else {
+ addMessage("I didn't understand that. Please choose one of the agent types:", 'bot', [
+ { value: 'recommendation', label: '🛍️ Recommendation Engine' },
+ { value: 'conversational', label: '💬 Conversational Assistant' }
+ ]);
+ }
+ }
+
+
+
+ function processLanguageSelection(input) {
+ let selectedLang = null;
+
+ // Handle special responses from coming soon message
+ if (input === 'wait') {
+ addMessage('Thanks for checking out the Redis AI Agent Builder. Come back when your preferred language is supported!', 'bot');
+ return;
+ }
+
+ if (CONFIG.languages[input]) {
+ selectedLang = input;
+ } else {
+ for (const [key, config] of Object.entries(CONFIG.languages)) {
+ if (config.keywords.some(keyword => input.includes(keyword))) {
+ selectedLang = key;
+ break;
+ }
+ }
+ }
+
+ if (selectedLang) {
+ // Check if it's Python (fully supported)
+ if (selectedLang === 'python') {
+ conversationState.selections.programmingLanguage = selectedLang;
+ const config = CONFIG.languages[selectedLang];
+
+ addMessage(`Excellent choice! ${config.description}`, 'bot');
+
+ // Move to next step
+ conversationState.step = 'model';
+ addMessage('Finally, which AI model would you like to use?', 'bot', [
+ { value: 'openai', label: '🤖 OpenAI (GPT-4)' },
+ { value: 'anthropic', label: '🧠 Anthropic (Claude)' },
+ { value: 'llama3', label: '🦙 Llama 3' }
+ ]);
+ } else {
+ // Handle other languages with coming soon message
+ const config = CONFIG.languages[selectedLang];
+ const languageName = config.name;
+
+ addMessage(`${languageName} support is coming soon. Currently, only Python is fully supported.`, 'bot');
+ addMessage(`Would you like to build a Python agent instead?`, 'bot', [
+ { value: 'python', label: 'Yes, use Python' },
+ { value: 'wait', label: 'I\'ll wait for ' + languageName }
+ ]);
+ }
+ } else {
+ addMessage("I didn't recognize that language. Please choose from:", 'bot', [
+ { value: 'python', label: '🐍 Python' },
+ { value: 'javascript', label: '🟨 JavaScript' },
+ { value: 'java', label: '☕ Java' },
+ { value: 'csharp', label: '🔷 C#' }
+ ]);
+ }
+ }
+
+ function processModelSelection(input) {
+ let selectedModel = null;
+
+ if (CONFIG.models[input]) {
+ selectedModel = input;
+ } else {
+ for (const [key, config] of Object.entries(CONFIG.models)) {
+ if (config.keywords.some(keyword => input.includes(keyword))) {
+ selectedModel = key;
+ break;
+ }
+ }
+ }
+
+ if (selectedModel) {
+ conversationState.selections.llmModel = selectedModel;
+ const config = CONFIG.models[selectedModel];
+
+ addMessage(`Perfect! ${config.description}`, 'bot');
+ addMessage('🎉 I have everything I need! Generating your Redis AI agent code...', 'bot');
+
+ // Generate code
+ setTimeout(() => {
+ generateAndDisplayCode();
+ }, 1500);
+ } else {
+ addMessage("I didn't recognize that model. Please choose from:", 'bot', [
+ { value: 'openai', label: '🤖 OpenAI (GPT-4)' },
+ { value: 'anthropic', label: '🧠 Anthropic (Claude)' },
+ { value: 'llama3', label: '🦙 Llama 3' }
+ ]);
+ }
+ }
+
+ function generateAndDisplayCode() {
+ const code = generateAgentCode(conversationState.selections);
+
+ addMessage('✅ Your Redis AI agent code is ready! You can copy or download it below.', 'bot');
+
+ // Show code section
+ displayGeneratedCode(code, conversationState.selections);
+ }
+
+ function generateAgentCode(formData) {
+ // This is a placeholder implementation
+ // In a real implementation, this would call an API or use templates
+ const genericTemplates = {
+ python: getGenericPythonCode,
+ javascript: getGenericJavaScriptCode,
+ java: getGenericJavaCode,
+ csharp: getGenericCSharpCode
+ };
+
+ const fileExtensions = {
+ python: '.py',
+ javascript: '.js',
+ java: '.java',
+ csharp: '.cs'
+ };
+ const base = window.HUGO_BASEURL || '';
+ const filename = `${base}code/agent-templates/${formData.programmingLanguage}/${formData.agentType}_agent${fileExtensions[formData.programmingLanguage]}`;
+
+ return loadTemplateFile(filename, formData) || genericTemplates[formData.programmingLanguage](formData);
+ }
+
+ function loadTemplateFile(filename, formData) {
+ try {
+ const xhr = new XMLHttpRequest();
+ xhr.open('GET', filename, false); // Synchronous request
+ xhr.send();
+
+ if (xhr.status === 200) {
+ let templateContent = xhr.responseText;
+
+ // Replace template variables
+ templateContent = templateContent.replace(/\$\{formData\.agentName\}/g, formData.agentName);
+ templateContent = templateContent.replace(/\$\{CONFIG\.agentTypes\[formData\.agentType\]\.description\}/g,
+ CONFIG.agentTypes[formData.agentType].description);
+ templateContent = templateContent.replace(/\$\{AgentClassName\}/g, formData.agentName.replace(/\s+/g, ''));
+ templateContent = templateContent.replace(/\$\{formData\.agentType\}/g, formData.agentType);
+ templateContent = templateContent.replace(/\$\{formData\.llmModel\.toUpperCase\(\)\}/g, formData.llmModel.toUpperCase());
+ templateContent = templateContent.replace(/\$\{formData\.llmModel\.toLowerCase\(\)\}/g, formData.llmModel.toLowerCase());
+ templateContent = templateContent.replace(/\$\{CONFIG\.models\[formData\.llmModel\]\.defaultModel\}/g,
+ CONFIG.models[formData.llmModel].defaultModel);
+ templateContent = templateContent.replace(/\$\{CONFIG\.models\[formData\.llmModel\]\.baseUrl\}/g,
+ CONFIG.models[formData.llmModel].baseUrl);
+
+ return templateContent;
+ } else {
+ console.log(`Template file not found: ${filename} (${xhr.status})`);
+ }
+ } catch (error) {
+ console.log(`Could not load template file ${filename}:`, error);
+ }
+
+ return null; // Return null if file loading failed
+ }
+
+ function getGenericPythonCode(formData) {
+ return `# ${formData.agentName} - Redis AI Agent
+# Generated for ${CONFIG.agentTypes[formData.agentType].description}
+
+import redis
+import os
+from typing import List, Dict, Any
+
+class ${formData.agentName.replace(/\s+/g, '')}:
+ def __init__(self):
+ self.redis_client = redis.Redis(
+ host=os.getenv('REDIS_HOST', 'localhost'),
+ port=int(os.getenv('REDIS_PORT', 6379)),
+ decode_responses=True
+ )
+ self.llm_api_key = os.getenv('${formData.llmModel.toUpperCase()}_API_KEY')
+
+ def process_query(self, query: str) -> Dict[str, Any]:
+ """Process user query and return response"""
+ # TODO: Implement ${formData.agentType} logic
+ pass
+
+ def store_data(self, key: str, data: Dict[str, Any]) -> bool:
+ """Store data in Redis"""
+ try:
+ self.redis_client.hset(key, mapping=data)
+ return True
+ except Exception as e:
+ print(f"Error storing data: {e}")
+ return False
+
+# Example usage
+if __name__ == "__main__":
+ agent = ${formData.agentName.replace(/\s+/g, '')}()
+ # Add your implementation here`;
+ }
+
+ function getGenericJavaScriptCode(formData) {
+ return `// ${formData.agentName} - Redis AI Agent
+// Generated for ${CONFIG.agentTypes[formData.agentType].description}
+
+const redis = require('redis');
+require('dotenv').config();
+
+class ${formData.agentName.replace(/\s+/g, '')} {
+ constructor() {
+ this.redisClient = redis.createClient({
+ host: process.env.REDIS_HOST || 'localhost',
+ port: process.env.REDIS_PORT || 6379
+ });
+ this.llmApiKey = process.env.${formData.llmModel.toUpperCase()}_API_KEY;
+ }
+
+ async processQuery(query) {
+ // TODO: Implement ${formData.agentType} logic
+ try {
+ // Your implementation here
+ return { success: true, response: "Placeholder response" };
+ } catch (error) {
+ console.error('Error processing query:', error);
+ return { success: false, error: error.message };
+ }
+ }
+
+ async storeData(key, data) {
+ try {
+ await this.redisClient.hSet(key, data);
+ return true;
+ } catch (error) {
+ console.error('Error storing data:', error);
+ return false;
+ }
+ }
+}
+
+// Example usage
+const agent = new ${formData.agentName.replace(/\s+/g, '')}();
+module.exports = ${formData.agentName.replace(/\s+/g, '')};`;
+ }
+
+ function getGenericJavaCode(formData) {
+ return `// ${formData.agentName} - Redis AI Agent
+// Generated for ${CONFIG.agentTypes[formData.agentType].description}
+
+import redis.clients.jedis.Jedis;
+import java.util.Map;
+import java.util.HashMap;
+
+public class ${formData.agentName.replace(/\s+/g, '')} {
+ private Jedis jedis;
+ private String llmApiKey;
+
+ public ${formData.agentName.replace(/\s+/g, '')}() {
+ this.jedis = new Jedis(
+ System.getenv().getOrDefault("REDIS_HOST", "localhost"),
+ Integer.parseInt(System.getenv().getOrDefault("REDIS_PORT", "6379"))
+ );
+ this.llmApiKey = System.getenv("${formData.llmModel.toUpperCase()}_API_KEY");
+ }
+
+ public Map processQuery(String query) {
+ // TODO: Implement ${formData.agentType} logic
+ Map response = new HashMap<>();
+ response.put("success", true);
+ response.put("response", "Placeholder response");
+ return response;
+ }
+
+ public boolean storeData(String key, Map data) {
+ try {
+ jedis.hset(key, data);
+ return true;
+ } catch (Exception e) {
+ System.err.println("Error storing data: " + e.getMessage());
+ return false;
+ }
+ }
+}`;
+ }
+
+ function getGenericCSharpCode(formData) {
+ return `// ${formData.agentName} - Redis AI Agent
+// Generated for ${CONFIG.agentTypes[formData.agentType].description}
+
+using StackExchange.Redis;
+using System;
+using System.Collections.Generic;
+using System.Threading.Tasks;
+
+public class ${formData.agentName.replace(/\s+/g, '')}
+{
+ private readonly IDatabase _database;
+ private readonly string _llmApiKey;
+
+ public ${formData.agentName.replace(/\s+/g, '')}()
+ {
+ var connection = ConnectionMultiplexer.Connect(
+ Environment.GetEnvironmentVariable("REDIS_CONNECTION_STRING") ?? "localhost:6379"
+ );
+ _database = connection.GetDatabase();
+ _llmApiKey = Environment.GetEnvironmentVariable("${formData.llmModel.toUpperCase()}_API_KEY");
+ }
+
+ public async Task> ProcessQueryAsync(string query)
+ {
+ // TODO: Implement ${formData.agentType} logic
+ return new Dictionary
+ {
+ ["success"] = true,
+ ["response"] = "Placeholder response"
+ };
+ }
+
+ public async Task StoreDataAsync(string key, Dictionary data)
+ {
+ try
+ {
+ var hash = data.Select(kvp => new HashEntry(kvp.Key, kvp.Value)).ToArray();
+ await _database.HashSetAsync(key, hash);
+ return true;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Error storing data: {ex.Message}");
+ return false;
+ }
+ }
+}`;
+ }
+
+ function displayGeneratedCode(code, formData) {
+ const highlightedCode = hljs.highlight(
+ code, { language: formData.programmingLanguage }
+ ).value;
+ elements.generatedCode.innerHTML = highlightedCode;
+ elements.codeSection.classList.remove('agent-builder-hidden');
+
+ // Make the initial wizard inactive now that code is generated
+ elements.chatContainer.classList.add('agent-builder-inactive');
+
+ // Store code for download
+ elements.codeSection.dataset.code = code;
+ elements.codeSection.dataset.filename = getFilename(formData);
+
+ // Handle Jupyter button state based on selected model
+ const tryJupyterBtn = document.getElementById('try-jupyter-btn');
+ if (tryJupyterBtn) {
+ if (formData.llmModel !== 'openai') {
+ // Disable and grey out the button for non-OpenAI models
+ tryJupyterBtn.disabled = true;
+ tryJupyterBtn.style.backgroundColor = '#B8B8B8';
+ tryJupyterBtn.style.color = '#4B4F58';
+ tryJupyterBtn.style.borderColor = '#B8B8B8';
+ tryJupyterBtn.style.cursor = 'not-allowed';
+ tryJupyterBtn.style.opacity = '1';
+ tryJupyterBtn.title = 'Coming soon';
+ } else {
+ // Enable the button for OpenAI models
+ tryJupyterBtn.disabled = false;
+ tryJupyterBtn.style.backgroundColor = '';
+ tryJupyterBtn.style.color = '';
+ tryJupyterBtn.style.borderColor = '';
+ tryJupyterBtn.style.cursor = 'pointer';
+ tryJupyterBtn.style.opacity = '1';
+ tryJupyterBtn.title = 'Try your agent in a Jupyter notebook';
+ }
+ }
+
+ // Attach event listeners to code action buttons now that they're visible
+ attachCodeActionListeners();
+
+ // Scroll to code section
+ elements.codeSection.scrollIntoView({ behavior: 'smooth' });
+ }
+
+ function attachCodeActionListeners() {
+ // Get the button elements now that the code section is visible
+ const copyBtn = document.getElementById('copy-code-btn');
+ const downloadBtn = document.getElementById('download-code-btn');
+ const tryJupyterBtn = document.getElementById('try-jupyter-btn');
+
+ // Attach listeners only if they haven't been attached yet
+ if (copyBtn && !copyBtn.hasAttribute('data-listener-attached')) {
+ copyBtn.addEventListener('click', copyCode);
+ copyBtn.setAttribute('data-listener-attached', 'true');
+ }
+
+ if (downloadBtn && !downloadBtn.hasAttribute('data-listener-attached')) {
+ downloadBtn.addEventListener('click', downloadCode);
+ downloadBtn.setAttribute('data-listener-attached', 'true');
+ }
+
+ if (tryJupyterBtn && !tryJupyterBtn.hasAttribute('data-listener-attached')) {
+ tryJupyterBtn.addEventListener('click', function(event) {
+ event.preventDefault();
+ tryInJupyter(event);
+ });
+ tryJupyterBtn.setAttribute('data-listener-attached', 'true');
+ }
+
+
+ // Add start again button listener
+ const startAgainBtn = document.getElementById('start-again-btn');
+ if (startAgainBtn && !startAgainBtn.hasAttribute('data-listener-attached')) {
+ startAgainBtn.addEventListener('click', function(event) {
+ event.preventDefault();
+ resetWizard();
+ });
+ startAgainBtn.setAttribute('data-listener-attached', 'true');
+ }
+ }
+
+ function getFilename(formData) {
+ const extensions = {
+ python: '.py',
+ javascript: '.js',
+ java: '.java',
+ csharp: '.cs'
+ };
+
+ const cleanName = formData.agentName.replace(/\s+/g, '').toLowerCase();
+ return `${cleanName}_agent${extensions[formData.programmingLanguage]}`;
+ }
+
+ function copyCode() {
+ // Get the raw code from the dataset (not the highlighted version)
+ const code = elements.codeSection.dataset.code;
+ const copyBtn = document.getElementById('copy-code-btn');
+
+ if (!code) {
+ alert('No code available to copy');
+ return;
+ }
+
+ // Try to copy to clipboard
+ if (navigator.clipboard && navigator.clipboard.writeText) {
+ navigator.clipboard.writeText(code).then(() => {
+ showCopyFeedback(copyBtn, true);
+ }).catch(err => {
+ console.error('Failed to copy to clipboard:', err);
+ fallbackCopyToClipboard(code, copyBtn);
+ });
+ } else {
+ // Fallback for older browsers
+ fallbackCopyToClipboard(code, copyBtn);
+ }
+ }
+
+ function showCopyFeedback(button, success) {
+ if (!button) return;
+
+ const originalHTML = button.innerHTML;
+
+ if (success) {
+ button.innerHTML = `
+
+ Copied!
+ `;
+ } else {
+ button.innerHTML = `
+
+ Failed
+ `;
+ }
+
+ setTimeout(() => {
+ button.innerHTML = originalHTML;
+ }, 2000);
+ }
+
+ function fallbackCopyToClipboard(text, button) {
+ // Create a temporary textarea element
+ const textArea = document.createElement('textarea');
+ textArea.value = text;
+ textArea.style.position = 'fixed';
+ textArea.style.left = '-999999px';
+ textArea.style.top = '-999999px';
+ document.body.appendChild(textArea);
+
+ try {
+ textArea.focus();
+ textArea.select();
+ const successful = document.execCommand('copy');
+ showCopyFeedback(button, successful);
+ } catch (err) {
+ console.error('Fallback copy failed:', err);
+ showCopyFeedback(button, false);
+ } finally {
+ document.body.removeChild(textArea);
+ }
+ }
+
+ function downloadCode() {
+ const code = elements.codeSection.dataset.code;
+ const filename = elements.codeSection.dataset.filename;
+
+ const blob = new Blob([code], { type: 'text/plain' });
+ const url = URL.createObjectURL(blob);
+
+ const a = document.createElement('a');
+ a.href = url;
+ a.download = filename;
+ document.body.appendChild(a);
+ a.click();
+ document.body.removeChild(a);
+ URL.revokeObjectURL(url);
+ }
+
+ function tryInJupyter(event) {
+ // Prevent any default behavior that might cause page reload
+ if (event) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+
+ // Check if the button is disabled (for non-OpenAI models)
+ const tryJupyterBtn = document.getElementById('try-jupyter-btn');
+ if (tryJupyterBtn && tryJupyterBtn.disabled) {
+ return false;
+ }
+
+ // Get the current agent configuration
+ const formData = conversationState.selections;
+
+ // Check if we have a specific Binder link for this configuration
+ if (formData.programmingLanguage === 'python' && formData.llmModel === 'openai') {
+ let binderUrl = null;
+
+ if (formData.agentType === 'recommendation') {
+ binderUrl = 'https://staging.learn.redis.com/binder/v2/gh/redis/binder-launchers/agent_recommendation_openai?urlpath=%2Fdoc%2Ftree%2Fdemo.ipynb';
+ } else if (formData.agentType === 'conversational') {
+ binderUrl = 'https://staging.learn.redis.com/binder/v2/gh/redis/binder-launchers/agent_conversational_openai?urlpath=%2Fdoc%2Ftree%2Fdemo.ipynb';
+ }
+
+ if (binderUrl) {
+ // Open the Binder notebook in a new tab
+ window.open(binderUrl, '_blank');
+ return false;
+ }
+ }
+
+ // Fallback to placeholder alert for other configurations
+ alert('Jupyter notebook integration coming soon!');
+
+ // Ensure we don't navigate away
+ return false;
+ }
+
+ // Code Chat Functions
+ /* Removed: Code Chat Functions */
+ /* function openCodeChat() {
+ const code = elements.codeSection.dataset.code;
+ if (!code) {
+ alert('No code available to chat about');
+ return;
+ }
+
+ // Show the chat section (wizard is already inactive from code generation)
+ elements.codeChatSection.classList.remove('agent-builder-hidden');
+
+ // Clear previous messages and reset state
+ elements.codeChatMessages.innerHTML = '';
+ codeChatState.conversationHistory = [];
+
+ // Check if we need to ask for API key
+ if (!codeChatState.apiKey && !codeChatState.hasAskedForKey) {
+ addCodeChatMessage('Hi! I can help you understand and work with your generated code.', 'bot');
+ addCodeChatMessage('To provide you with the best assistance, please enter your OpenAI API key. This will enable me to give you detailed, context-aware help about your code.', 'bot');
+ addCodeChatMessage('You can get an OpenAI API key from: https://platform.openai.com/api-keys', 'bot');
+ addCodeChatMessage('Please enter your API key (starts with "sk-"):', 'bot');
+ codeChatState.hasAskedForKey = true;
+ } else if (codeChatState.apiKey) {
+ addCodeChatMessage('Hi! I can help you understand and work with your generated code. What would you like to know?', 'bot');
+ } else {
+ addCodeChatMessage('Please enter your OpenAI API key to continue:', 'bot');
+ }
+
+ // Set up event listeners
+ setupCodeChatListeners();
+
+ // Focus on input
+ elements.codeChatInput.focus();
+
+ // Scroll to the chat section
+ elements.codeChatSection.scrollIntoView({ behavior: 'smooth' });
+ }
+
+ */
+
+ function closeCodeChat() {
+ if (elements.codeChatSection) {
+ elements.codeChatSection.classList.add('agent-builder-hidden');
+ }
+ if (elements.codeChatInput) {
+ elements.codeChatInput.value = '';
+ }
+ if (elements.codeChatSend) {
+ elements.codeChatSend.disabled = true;
+ }
+
+ // Don't reactivate the wizard - it should remain inactive until "Start again" is clicked
+ // The wizard was made inactive when code was generated, not when chat was opened
+
+ // Remove event listeners to prevent memory leaks
+ removeCodeChatListeners();
+ }
+
+ function resetWizard() {
+ // Hide code section
+ elements.codeSection.classList.add('agent-builder-hidden');
+
+ // Hide code chat section
+ if (elements.codeChatSection) {
+ elements.codeChatSection.classList.add('agent-builder-hidden');
+ }
+
+ // Reactivate the initial wizard
+ elements.chatContainer.classList.remove('agent-builder-inactive');
+
+ // Reset conversation state
+ conversationState = {
+ step: 'agent-type',
+ selections: {}
+ };
+
+ // Clear chat messages except the initial one
+ const initialMessage = elements.chatMessages.querySelector('.chat-message.bot-message');
+ elements.chatMessages.innerHTML = '';
+ if (initialMessage) {
+ elements.chatMessages.appendChild(initialMessage.cloneNode(true));
+ }
+
+ // Reset input
+ elements.chatInput.value = '';
+ elements.sendButton.disabled = true;
+
+ // Hide suggestions
+ hideSuggestions();
+
+ // Clear code chat state
+ codeChatState.conversationHistory = [];
+ codeChatState.hasAskedForKey = false;
+
+ // Reset Jupyter button state
+ const tryJupyterBtn = document.getElementById('try-jupyter-btn');
+ if (tryJupyterBtn) {
+ tryJupyterBtn.disabled = false;
+ tryJupyterBtn.style.backgroundColor = '';
+ tryJupyterBtn.style.color = '';
+ tryJupyterBtn.style.borderColor = '';
+ tryJupyterBtn.style.opacity = '1';
+ tryJupyterBtn.style.cursor = 'pointer';
+ tryJupyterBtn.title = 'Try your agent in a Jupyter notebook';
+ }
+
+ // Focus on input
+ elements.chatInput.focus();
+ }
+
+ /* Removed: Code Chat listeners and handlers as feature is removed */
+ function setupCodeChatListeners() { /* no-op */ }
+
+ function removeCodeChatListeners() { /* no-op */ }
+
+ function handleCodeChatInputChange() {
+ const input = elements.codeChatInput.value.trim();
+ elements.codeChatSend.disabled = input.length === 0;
+ }
+
+ function handleCodeChatKeyDown(e) {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ handleCodeChatSend();
+ }
+ }
+
+ function handleCodeChatSend() {
+ const input = elements.codeChatInput.value.trim();
+ if (!input) return;
+
+ // Special handling for API key input (don't show it in chat)
+ if (!codeChatState.apiKey && input.startsWith('sk-')) {
+ // Add a masked version to chat
+ addCodeChatMessage('OpenAI API key: ' + '*'.repeat(Math.min(input.length, 20)), 'user');
+ } else {
+ // Add user message normally
+ addCodeChatMessage(input, 'user');
+ }
+
+ // Clear input
+ elements.codeChatInput.value = '';
+ elements.codeChatSend.disabled = true;
+
+ // Process the message
+ processCodeChatMessage(input);
+ }
+
+ function addCodeChatMessage(content, type) {
+ const messageDiv = document.createElement('div');
+ messageDiv.className = `chat-message ${type}-message`;
+
+ const avatar = document.createElement('div');
+ avatar.className = 'message-avatar';
+
+ if (type === 'bot') {
+ avatar.innerHTML = `
+
+ `;
+ } else {
+ avatar.innerHTML = `
+
+ `;
+ }
+
+ const messageContent = document.createElement('div');
+ messageContent.className = 'message-content';
+
+ // Render Markdown for bot messages, plain text for user messages
+ if (type === 'bot' && window.marked) {
+ try {
+ // Configure marked for safe rendering
+ marked.setOptions({
+ breaks: true,
+ gfm: true,
+ sanitize: false,
+ highlight: function(code, lang) {
+ // Use highlight.js if available
+ if (window.hljs && lang) {
+ try {
+ return hljs.highlight(code, { language: lang }).value;
+ } catch (e) {
+ return hljs.highlightAuto(code).value;
+ }
+ }
+ return code;
+ }
+ });
+ messageContent.innerHTML = marked.parse(content);
+ } catch (error) {
+ console.warn('Markdown parsing failed, falling back to plain text:', error);
+ messageContent.innerHTML = `
${content}
`;
+ }
+ } else {
+ // Fallback to plain text wrapped in paragraph
+ messageContent.innerHTML = `
${content}
`;
+ }
+
+ messageDiv.appendChild(avatar);
+ messageDiv.appendChild(messageContent);
+ elements.codeChatMessages.appendChild(messageDiv);
+
+ // Scroll to bottom
+ elements.codeChatMessages.scrollTop = elements.codeChatMessages.scrollHeight;
+ }
+
+ function processCodeChatMessage(input) {
+ // Check for special commands
+ if (input.toLowerCase() === '/reset-key' || input.toLowerCase() === '/new-key') {
+ codeChatState.apiKey = null;
+ codeChatState.hasAskedForKey = false;
+ addCodeChatMessage('🔄 API key reset. Please enter your new OpenAI API key:', 'bot');
+ return;
+ }
+
+ // Check if this is an API key input
+ if (!codeChatState.apiKey && input.startsWith('sk-')) {
+ codeChatState.apiKey = input.trim();
+ addCodeChatMessage('✅ OpenAI API key saved! Now I can provide you with intelligent assistance about your code. What would you like to know?', 'bot');
+ addCodeChatMessage('💡 Tip: Type "/reset-key" if you need to change your API key later.', 'bot');
+ return;
+ }
+
+ // If no API key, remind user
+ if (!codeChatState.apiKey) {
+ addCodeChatMessage('Please enter your OpenAI API key first to enable intelligent responses.', 'bot');
+ return;
+ }
+
+ // Add user message to conversation history
+ codeChatState.conversationHistory.push({
+ role: 'user',
+ content: input
+ });
+
+ // Show thinking indicator
+ addCodeChatMessage('🤔 Thinking...', 'bot');
+
+ // Make LLM call
+ callLiteLLM(input);
+ }
+
+ async function callLiteLLM(userMessage) {
+ try {
+ const code = elements.codeSection.dataset.code;
+ const formData = conversationState.selections;
+
+ // Prepare the system message with code context
+ const systemMessage = `You are a helpful AI assistant specializing in Redis and AI development.
+
+The user has generated the following ${formData.programmingLanguage} code for a ${CONFIG.agentTypes[formData.agentType].name} using ${formData.llmModel}:
+
+\`\`\`${formData.programmingLanguage}
+${code}
+\`\`\`
+
+Please help the user understand, modify, debug, or deploy this code. Provide specific, actionable advice based on the actual code shown above.`;
+
+ // Prepare messages for the API call
+ const messages = [
+ { role: 'system', content: systemMessage },
+ ...codeChatState.conversationHistory.slice(-10), // Keep last 10 messages for context
+ { role: 'user', content: userMessage }
+ ];
+
+ // Make the API call using fetch with custom base URL
+ const response = await fetch('https://d34j1iks5zrrtk.cloudfront.net/v1/chat/completions', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${codeChatState.apiKey}`
+ },
+ body: JSON.stringify({
+ model: 'gpt-3.5-turbo',
+ messages: messages,
+ max_tokens: 1000,
+ temperature: 0.7
+ })
+ });
+
+ if (!response.ok) {
+ const errorText = await response.text();
+ console.error('API Response:', response.status, response.statusText, errorText);
+ throw new Error(`API call failed: ${response.status} ${response.statusText} - ${errorText}`);
+ }
+
+ const data = await response.json();
+ const assistantMessage = data.choices[0].message.content;
+
+ // Remove thinking indicator and add real response
+ removeLastBotMessage();
+ addCodeChatMessage(assistantMessage, 'bot');
+
+ // Add to conversation history
+ codeChatState.conversationHistory.push({
+ role: 'assistant',
+ content: assistantMessage
+ });
+
+ } catch (error) {
+ console.error('OpenAI API error:', error);
+
+ // Remove thinking indicator and show error
+ removeLastBotMessage();
+
+ // Handle different types of API errors
+ if (error.message.includes('401') || error.message.includes('Incorrect API key')) {
+ addCodeChatMessage('❌ Invalid OpenAI API key. Please check your API key and try again.', 'bot');
+ addCodeChatMessage('Make sure your API key starts with "sk-" and has sufficient credits.', 'bot');
+ codeChatState.apiKey = null; // Reset API key
+ } else if (error.message.includes('429') || error.message.includes('rate limit')) {
+ addCodeChatMessage('⚠️ Rate limit exceeded. Please wait a moment and try again.', 'bot');
+ } else if (error.message.includes('403') || error.message.includes('insufficient_quota') || error.message.includes('quota')) {
+ addCodeChatMessage('❌ Your OpenAI account has insufficient credits. Please add credits to your account.', 'bot');
+ } else {
+ addCodeChatMessage('❌ Sorry, I encountered an error connecting to the AI service. Please try again.', 'bot');
+ console.log('Full error details:', error);
+ }
+
+ // Fallback to basic response
+ const fallbackResponse = generateCodeChatResponse(userMessage, code, formData);
+ addCodeChatMessage('Here\'s some basic help while the AI service is unavailable:', 'bot');
+ addCodeChatMessage(fallbackResponse, 'bot');
+ }
+ }
+
+ function removeLastBotMessage() {
+ const messages = elements.codeChatMessages.children;
+ if (messages.length > 0) {
+ const lastMessage = messages[messages.length - 1];
+ if (lastMessage.classList.contains('bot-message')) {
+ lastMessage.remove();
+ }
+ }
+ }
+
+ function generateCodeChatResponse(input, code, formData) {
+ const lowerInput = input.toLowerCase();
+
+ // Simple keyword-based responses for demonstration
+ if (lowerInput.includes('how') && (lowerInput.includes('run') || lowerInput.includes('execute'))) {
+ return `To run this ${formData.programmingLanguage} code:
+
+1. Save the code to a file with the appropriate extension
+2. Install the required dependencies (Redis client library and ${formData.llmModel} SDK)
+3. Set up your environment variables for Redis connection and API keys
+4. Run the file using your ${formData.programmingLanguage} interpreter
+
+Would you like specific installation commands for the dependencies?`;
+ }
+
+ if (lowerInput.includes('dependencies') || lowerInput.includes('install') || lowerInput.includes('requirements')) {
+ const deps = getDependenciesForLanguage(formData.programmingLanguage, formData.llmModel);
+ return `Here are the dependencies you need to install:
+
+${deps}
+
+Make sure you have ${formData.programmingLanguage} installed on your system first.`;
+ }
+
+ if (lowerInput.includes('redis') && (lowerInput.includes('connect') || lowerInput.includes('setup'))) {
+ return `To set up Redis for this agent:
+
+1. Install Redis locally or use Redis Cloud
+2. Set these environment variables:
+ - REDIS_HOST (default: localhost)
+ - REDIS_PORT (default: 6379)
+ - REDIS_PASSWORD (if required)
+
+For Redis Cloud, you can get a free database at https://redis.io/try-free/`;
+ }
+
+ if (lowerInput.includes('api') && lowerInput.includes('key')) {
+ return `You'll need to set up API keys for ${formData.llmModel}:
+
+Set the environment variable: ${formData.llmModel.toUpperCase()}_API_KEY
+
+Get your API key from:
+- OpenAI: https://platform.openai.com/api-keys
+- Anthropic: https://console.anthropic.com/
+- For Llama 2: You can run it locally or use a hosted service
+
+Never hardcode API keys in your source code!`;
+ }
+
+ if (lowerInput.includes('explain') || lowerInput.includes('what does')) {
+ return `This code creates a ${CONFIG.agentTypes[formData.agentType].name} that:
+
+${CONFIG.agentTypes[formData.agentType].features.map(feature => `• ${feature}`).join('\n')}
+
+The main components are:
+• Redis client for data storage and retrieval
+• ${formData.llmModel} integration for AI responses
+• Error handling and logging
+• Data persistence methods
+
+What specific part would you like me to explain in more detail?`;
+ }
+
+ if (lowerInput.includes('modify') || lowerInput.includes('customize') || lowerInput.includes('change')) {
+ return `You can customize this code by:
+
+• Modifying the system prompt in the LLM calls
+• Adding custom data processing logic
+• Implementing additional Redis data structures
+• Adding authentication and security features
+• Scaling with connection pooling
+
+What specific modifications are you thinking about?`;
+ }
+
+ if (lowerInput.includes('error') || lowerInput.includes('debug') || lowerInput.includes('troubleshoot')) {
+ return `Common issues and solutions:
+
+• **Connection errors**: Check Redis is running and credentials are correct
+• **API errors**: Verify your ${formData.llmModel} API key is valid and has credits
+• **Import errors**: Make sure all dependencies are installed
+• **Permission errors**: Check file permissions and environment variables
+
+What specific error are you encountering?`;
+ }
+
+ // Default response
+ return `I can help you with questions about:
+
+• How to run and deploy this code
+• Setting up dependencies and environment
+• Connecting to Redis and configuring APIs
+• Customizing the agent for your needs
+• Troubleshooting common issues
+• Explaining how different parts work
+
+What would you like to know more about?`;
+ }
+
+ function getDependenciesForLanguage(language, llmModel) {
+ const deps = {
+ python: {
+ base: "pip install redis openai python-dotenv",
+ openai: "pip install redis openai python-dotenv",
+ anthropic: "pip install redis anthropic python-dotenv",
+ llama2: "pip install redis transformers torch python-dotenv"
+ },
+ javascript: {
+ base: "npm install redis dotenv",
+ openai: "npm install redis openai dotenv",
+ anthropic: "npm install redis @anthropic-ai/sdk dotenv",
+ llama2: "npm install redis @huggingface/inference dotenv"
+ },
+ java: {
+ base: "Add to pom.xml: jedis, okhttp3",
+ openai: "Add to pom.xml: jedis, okhttp3 (for OpenAI API)",
+ anthropic: "Add to pom.xml: jedis, okhttp3 (for Anthropic API)",
+ llama2: "Add to pom.xml: jedis, okhttp3 (for Llama API)"
+ },
+ csharp: {
+ base: "dotnet add package StackExchange.Redis",
+ openai: "dotnet add package StackExchange.Redis OpenAI",
+ anthropic: "dotnet add package StackExchange.Redis Anthropic.SDK",
+ llama2: "dotnet add package StackExchange.Redis (+ Llama API client)"
+ }
+ };
+
+ return deps[language]?.[llmModel] || deps[language]?.base || "Dependencies information not available";
+ }
+
+ // Initialize when script loads
+ init();
+
+})();