diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml new file mode 100644 index 00000000..8cd8b9b0 --- /dev/null +++ b/.github/workflows/ci-cd.yml @@ -0,0 +1,327 @@ +# AI Shopping Concierge - CI/CD Pipeline Configuration + +name: AI Shopping Concierge CI/CD + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + # Test and Quality Assurance + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.10', '3.11'] + + services: + postgres: + image: postgres:15 + env: + POSTGRES_PASSWORD: postgres + POSTGRES_DB: test_db + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + + redis: + image: redis:7 + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 6379:6379 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Cache pip dependencies + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pytest pytest-asyncio pytest-cov black isort mypy safety bandit + + - name: Code formatting check + run: | + black --check ai_shopping_agent/ + isort --check-only ai_shopping_agent/ + + - name: Type checking + run: | + mypy ai_shopping_agent/ --ignore-missing-imports + + - name: Security scan + run: | + safety check --json + bandit -r ai_shopping_agent/ -f json + + - name: Run tests + env: + DATABASE_URL: postgresql+asyncpg://postgres:postgres@localhost:5432/test_db + REDIS_URL: redis://localhost:6379/0 + GOOGLE_AI_API_KEY: test_key + WHATSAPP_ACCESS_TOKEN: test_token + run: | + pytest tests/ -v --cov=ai_shopping_agent --cov-report=xml --cov-report=html + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + file: ./coverage.xml + flags: unittests + name: codecov-umbrella + + # Build Docker image + build: + needs: test + runs-on: ubuntu-latest + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop') + + outputs: + image-tag: ${{ steps.meta.outputs.tags }} + image-digest: ${{ steps.build.outputs.digest }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,prefix=main-{{branch}}-,enable=${{ github.ref == 'refs/heads/main' }} + type=sha,prefix=develop-{{branch}}-,enable=${{ github.ref == 'refs/heads/develop' }} + + - name: Build and push Docker image + id: build + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64,linux/arm64 + + # Deploy to staging + deploy-staging: + needs: [test, build] + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/develop' + environment: staging + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up kubectl + uses: azure/setup-kubectl@v3 + with: + version: 'v1.28.0' + + - name: Configure kubectl + run: | + echo "${{ secrets.KUBE_CONFIG_STAGING }}" | base64 -d > $HOME/.kube/config + + - name: Deploy to staging + run: | + # Update image tag in Kubernetes manifests + sed -i "s|image: .*|image: ${{ needs.build.outputs.image-tag }}|g" deployment/kubernetes/staging/deployment.yaml + + # Apply Kubernetes manifests + kubectl apply -f deployment/kubernetes/staging/ + + # Wait for rollout + kubectl rollout status deployment/ai-shopping-concierge -n staging --timeout=300s + + - name: Run smoke tests + run: | + # Wait for service to be ready + sleep 30 + + # Get service URL + STAGING_URL=$(kubectl get service ai-shopping-concierge-service -n staging -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + + # Run smoke tests + curl -f http://$STAGING_URL:8000/health || exit 1 + + # Test webhook endpoint + curl -f -X GET "http://$STAGING_URL:8000/webhook/whatsapp?hub.mode=subscribe&hub.challenge=test&hub.verify_token=test" || exit 1 + + - name: Notify Slack + if: always() + uses: 8398a7/action-slack@v3 + with: + status: ${{ job.status }} + channel: '#deployments' + text: 'Staging deployment ${{ job.status }}' + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + # Deploy to production + deploy-production: + needs: [test, build, deploy-staging] + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' + environment: production + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up kubectl + uses: azure/setup-kubectl@v3 + with: + version: 'v1.28.0' + + - name: Configure kubectl + run: | + echo "${{ secrets.KUBE_CONFIG_PRODUCTION }}" | base64 -d > $HOME/.kube/config + + - name: Deploy to production + run: | + # Update image tag in Kubernetes manifests + sed -i "s|image: .*|image: ${{ needs.build.outputs.image-tag }}|g" deployment/kubernetes/production/deployment.yaml + + # Apply Kubernetes manifests + kubectl apply -f deployment/kubernetes/production/ + + # Wait for rollout + kubectl rollout status deployment/ai-shopping-concierge -n production --timeout=600s + + - name: Run production health checks + run: | + # Wait for service to be ready + sleep 60 + + # Get production URL + PROD_URL="https://api.ai-shopping-concierge.com" + + # Run comprehensive health checks + curl -f $PROD_URL/health || exit 1 + + # Test critical endpoints + curl -f -H "Authorization: Bearer ${{ secrets.HEALTH_CHECK_TOKEN }}" $PROD_URL/api/v1/health/deep || exit 1 + + - name: Create GitHub release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: v${{ github.run_number }} + release_name: Release v${{ github.run_number }} + body: | + ## Changes in this release + - Automated deployment from commit ${{ github.sha }} + - Image: ${{ needs.build.outputs.image-tag }} + - Deployed to production: $(date) + draft: false + prerelease: false + + - name: Notify Slack + if: always() + uses: 8398a7/action-slack@v3 + with: + status: ${{ job.status }} + channel: '#deployments' + text: 'Production deployment ${{ job.status }} - v${{ github.run_number }}' + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + + # Security scanning + security-scan: + needs: build + runs-on: ubuntu-latest + if: github.event_name == 'push' + + steps: + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@master + with: + image-ref: ${{ needs.build.outputs.image-tag }} + format: 'sarif' + output: 'trivy-results.sarif' + + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v2 + if: always() + with: + sarif_file: 'trivy-results.sarif' + + # Performance testing + performance-test: + needs: deploy-staging + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/develop' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + + - name: Install k6 + run: | + sudo gpg -k + sudo gpg --no-default-keyring --keyring /usr/share/keyrings/k6-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 + echo "deb [signed-by=/usr/share/keyrings/k6-archive-keyring.gpg] https://dl.k6.io/deb stable main" | sudo tee /etc/apt/sources.list.d/k6.list + sudo apt-get update + sudo apt-get install k6 + + - name: Run performance tests + run: | + k6 run tests/performance/load-test.js --env STAGING_URL=${{ secrets.STAGING_URL }} + + - name: Upload performance results + uses: actions/upload-artifact@v3 + with: + name: performance-results + path: performance-results.json \ No newline at end of file diff --git a/COMPLETE_SETUP_GUIDE.md b/COMPLETE_SETUP_GUIDE.md new file mode 100644 index 00000000..2ca894d9 --- /dev/null +++ b/COMPLETE_SETUP_GUIDE.md @@ -0,0 +1,353 @@ +# AI Shopping Concierge - Complete Setup and Deployment Guide + +## ๐Ÿš€ Quick Start + +The AI Shopping Concierge is built on top of the AP2 protocol, providing intelligent shopping assistance with WhatsApp/web chat, AI curation, negotiation, and seamless checkout. + +## ๐Ÿ“ Project Structure + +``` +AP2/ +โ”œโ”€โ”€ product-layer/ # Your product innovations +โ”‚ โ””โ”€โ”€ ai-shopping-agent/ # AI Shopping Concierge code +โ”œโ”€โ”€ scripts/ +โ”‚ โ”œโ”€โ”€ repository-setup/ # Setup and migration scripts +โ”‚ โ””โ”€โ”€ automation/ # Sync, deploy, maintenance +โ”œโ”€โ”€ samples/python/ # Extended AP2 Python samples +โ”œโ”€โ”€ deployment/ # Docker, K8s, cloud configs +โ”œโ”€โ”€ docs/ai-shopping-concierge/ # Documentation +โ””โ”€โ”€ .github/workflows/ # CI/CD pipelines +``` + +## ๐Ÿ”ง Complete Setup Workflow + +### Step 1: Repository Setup + +#### Option A: Linux/Mac +```bash +# Fork AP2 repo and set up your product repo +./scripts/repository-setup/1-fork-and-setup.sh + +# Sync and verify setup +./scripts/repository-setup/2-sync-and-verify.sh + +# Migrate your code to product layer +./scripts/repository-setup/3-migrate-code.sh +``` + +#### Option B: Windows +```powershell +# Fork AP2 repo and set up your product repo +.\scripts\repository-setup\1-fork-and-setup.bat +``` + +### Step 2: Development Setup + +1. **Install Dependencies** +```bash +cd samples/python +pip install -r requirements.txt +pip install -e . +``` + +2. **Environment Configuration** +```bash +cp .env.example .env +# Edit .env with your API keys and configuration +``` + +3. **Database Setup** +```bash +# Start development environment +docker-compose -f docker-compose.dev.yml up -d + +# Run migrations +python -m alembic upgrade head +``` + +### Step 3: Run the AI Shopping Concierge + +#### Development Mode +```bash +# Start the FastAPI server +cd samples/python +python -m src.common.server + +# Or use the development docker setup +docker-compose -f docker-compose.dev.yml up +``` + +#### Production Mode +```bash +# Build and run production containers +docker-compose -f docker-compose.production.yml up -d +``` + +## โ˜๏ธ Cloud Deployment + +### AWS ECS +```bash +./scripts/automation/cloud-deploy.sh --provider aws --region us-west-2 deploy +``` + +### Google Cloud Run +```bash +./scripts/automation/cloud-deploy.sh --provider gcp --region us-central1 deploy +``` + +### Azure Container Instances +```bash +./scripts/automation/cloud-deploy.sh --provider azure --region eastus deploy +``` + +### Kubernetes +```bash +# Apply Kubernetes manifests +kubectl apply -f deployment/kubernetes/production.yaml + +# Check deployment status +kubectl get pods -n ai-shopping-concierge +``` + +## ๐Ÿ”„ Maintenance and Updates + +### Sync with Upstream AP2 +```bash +# Sync latest changes from Google's AP2 repo +./scripts/automation/sync-upstream.sh + +# Deploy updated version +./scripts/automation/deploy.sh production +``` + +### Monitor and Maintain +```bash +# Run maintenance tasks +./scripts/automation/maintenance.sh + +# Check application health +curl http://localhost:8000/health +``` + +## ๐Ÿงช Testing + +### Unit Tests +```bash +cd samples/python +python -m pytest tests/ -v +``` + +### Integration Tests +```bash +# Test WhatsApp integration +python -m pytest tests/integration/test_whatsapp.py + +# Test AI curation +python -m pytest tests/integration/test_curation.py + +# Test checkout flow +python -m pytest tests/integration/test_checkout.py +``` + +### Load Testing +```bash +# Install k6 +brew install k6 # macOS +# or download from https://k6.io/ + +# Run load tests +k6 run tests/load/basic-load-test.js +``` + +## ๐Ÿ“Š Monitoring and Observability + +### Prometheus Metrics +- Application metrics: `http://localhost:9090` +- Custom metrics for shopping sessions, conversions, etc. + +### Grafana Dashboards +- Performance dashboard: `http://localhost:3000` +- Business metrics dashboard with shopping analytics + +### Log Aggregation +- Centralized logging with Fluentd +- Structured logs for easy analysis + +## ๐Ÿ” Security + +### Environment Variables +```bash +# Required API keys and secrets +GOOGLE_AI_API_KEY=your_google_ai_key +WHATSAPP_ACCESS_TOKEN=your_whatsapp_token +AP2_MERCHANT_ID=your_merchant_id +AP2_API_KEY=your_ap2_key +SECRET_KEY=your_secret_key +``` + +### SSL/TLS Configuration +- Automatic HTTPS with Let's Encrypt +- Certificate management in cloud deployments + +## ๐Ÿ—๏ธ Architecture Overview + +### Core Components +1. **WhatsApp Integration** (`channels/whatsapp_integration.py`) + - Business API integration + - Message handling and routing + +2. **AI Curation Engine** (`ai_curation/smart_curation_engine.py`) + - Product recommendation + - Personalized suggestions + +3. **Negotiation Engine** (`ai_curation/negotiation_engine.py`) + - Dynamic pricing + - Bundle optimization + +4. **Checkout Optimizer** (`optimization/checkout_optimizer.py`) + - Payment processing + - Currency conversion + - Settlement handling + +5. **Analytics Engine** (`analytics/performance_analytics.py`) + - Real-time metrics + - Business intelligence + +### Data Flow +``` +Customer Message โ†’ WhatsApp โ†’ AI Agent โ†’ Product Curation โ†’ Negotiation โ†’ AP2 Checkout โ†’ Payment + โ†“ + Analytics & Monitoring +``` + +## ๐Ÿ”ง Configuration + +### Application Settings +```python +# src/common/config.py +class Settings: + environment: str = "production" + debug: bool = False + database_url: str + redis_url: str + google_ai_api_key: str + whatsapp_access_token: str + ap2_merchant_id: str + # ... more settings +``` + +### Feature Flags +```python +# Enable/disable features +ENABLE_AI_NEGOTIATION = True +ENABLE_CURRENCY_CONVERSION = True +ENABLE_ANALYTICS = True +MAX_PRODUCTS_PER_RECOMMENDATION = 10 +``` + +## ๐Ÿ“ˆ Scaling + +### Horizontal Scaling +- Auto-scaling based on CPU/memory usage +- Load balancing across multiple instances + +### Database Scaling +- Read replicas for analytics queries +- Connection pooling for high concurrency + +### Caching Strategy +- Redis for session data and recommendations +- CDN for static assets + +## ๐Ÿ› Troubleshooting + +### Common Issues + +1. **WhatsApp Webhook Verification Failed** + ```bash + # Check webhook URL and verify token + curl -X GET "https://your-domain.com/webhooks/whatsapp?hub.verify_token=your_verify_token&hub.challenge=challenge" + ``` + +2. **Database Connection Issues** + ```bash + # Check database connectivity + docker-compose logs db + ``` + +3. **AI API Rate Limits** + ```bash + # Monitor API usage and implement rate limiting + # Check logs for rate limit errors + ``` + +### Debug Mode +```bash +# Enable debug logging +export DEBUG=true +export LOG_LEVEL=DEBUG +``` + +## ๐Ÿ“š API Reference + +### REST Endpoints +- `GET /health` - Health check +- `POST /webhooks/whatsapp` - WhatsApp webhook +- `GET /api/products/recommendations` - Product recommendations +- `POST /api/checkout/initiate` - Start checkout process +- `GET /api/analytics/dashboard` - Analytics data + +### WebSocket Endpoints +- `/ws/chat/{session_id}` - Real-time chat +- `/ws/notifications` - Real-time notifications + +## ๐Ÿค Contributing + +### Development Workflow +1. Fork the repository +2. Create a feature branch +3. Make changes and add tests +4. Run tests and linting +5. Submit a pull request + +### Code Style +```bash +# Format code +black samples/python/src/ +isort samples/python/src/ + +# Lint code +flake8 samples/python/src/ +mypy samples/python/src/ +``` + +## ๐Ÿ“ License + +This project extends the AP2 protocol under the Apache 2.0 License. See [LICENSE](LICENSE) for details. + +## ๐Ÿ†˜ Support + +- **Documentation**: [docs/ai-shopping-concierge/](docs/ai-shopping-concierge/) +- **Issues**: GitHub Issues +- **Discord**: [AP2 Community](https://discord.gg/ap2-community) + +## ๐ŸŽฏ Roadmap + +### Current Features โœ… +- WhatsApp Business API integration +- AI-powered product curation +- Dynamic pricing and negotiation +- AP2 secure checkout +- Real-time analytics +- Multi-cloud deployment + +### Upcoming Features ๐Ÿšง +- Voice integration (Twilio/AWS Connect) +- Multi-language support +- Advanced ML recommendations +- Inventory management integration +- B2B marketplace features + +--- + +**Ready to revolutionize shopping with AI? Start building your intelligent shopping concierge today!** ๐Ÿ›๏ธโœจ \ No newline at end of file diff --git a/DEPLOYMENT_GUIDE.md b/DEPLOYMENT_GUIDE.md new file mode 100644 index 00000000..acf13d84 --- /dev/null +++ b/DEPLOYMENT_GUIDE.md @@ -0,0 +1,778 @@ +# AI Shopping Agent Deployment Guide + +## Overview + +This guide provides complete setup and deployment instructions for the enhanced AI Shopping Agent built on the AP2 (Agent Payments Protocol) platform. The solution includes multi-channel support (WhatsApp, Web Chat), AI-powered product curation, intelligent negotiation, and comprehensive analytics. + +## Features + +- **Multi-Channel Support**: WhatsApp Business API, Web Chat Widget, SMS, Telegram +- **AI Product Curation**: Personalized recommendations, smart bundles, dynamic pricing +- **Intelligent Negotiation**: Real-time price negotiation, discount optimization +- **Checkout Optimization**: Cart abandonment recovery, one-click purchasing +- **Comprehensive Analytics**: Conversion tracking, AOV metrics, customer insights +- **AP2 Integration**: Secure payments through Agent Payments Protocol + +## Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ WhatsApp โ”‚ โ”‚ Web Chat โ”‚ โ”‚ Other โ”‚ +โ”‚ Business API โ”‚ โ”‚ Widget โ”‚ โ”‚ Channels โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ โ”‚ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Unified Chat Manager โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ โ”‚ โ”‚ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ AI Curation โ”‚ โ”‚ Negotiation โ”‚ โ”‚ Checkout โ”‚ +โ”‚ Engine โ”‚ โ”‚ Engine โ”‚ โ”‚ Optimizer โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ โ”‚ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ AP2 Shopping Agent โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Analytics Engine โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Prerequisites + +### System Requirements + +- Python 3.10 or higher +- Node.js 18+ (for web components) +- PostgreSQL 12+ or SQLite (for production use PostgreSQL) +- Redis 6+ (for session management and caching) +- Minimum 4GB RAM, 2 CPU cores +- 20GB disk space + +### Required Services + +1. **Google AI Studio API Key** or **Vertex AI** access +2. **WhatsApp Business API** account +3. **Facebook Developer** account (for WhatsApp) +4. **Domain** with SSL certificate +5. **Email service** (for notifications) + +### Development Tools + +```bash +# Install uv (Python package manager) +curl -LsSf https://astral.sh/uv/install.sh | sh + +# Install Node.js dependencies +npm install -g typescript + +# Install Docker (optional but recommended) +# Follow Docker installation guide for your OS +``` + +## Installation + +### 1. Clone and Setup Repository + +```bash +# Clone the AP2 repository +git clone https://github.com/google-agentic-commerce/AP2.git +cd AP2 + +# Create virtual environment +uv venv +source .venv/bin/activate # On Windows: .venv\Scripts\activate + +# Install dependencies +uv pip install -r requirements.txt +uv pip install -e . +``` + +### 2. Environment Configuration + +Create a `.env` file in the root directory: + +```bash +# Google AI Configuration +GOOGLE_API_KEY=your_google_ai_api_key +# OR for Vertex AI: +# GOOGLE_GENAI_USE_VERTEXAI=true +# GOOGLE_CLOUD_PROJECT=your-project-id +# GOOGLE_CLOUD_LOCATION=global + +# WhatsApp Business API +WHATSAPP_BUSINESS_TOKEN=your_whatsapp_token +WHATSAPP_PHONE_NUMBER_ID=your_phone_number_id +WHATSAPP_WEBHOOK_VERIFY_TOKEN=your_webhook_verify_token + +# Database Configuration +DATABASE_URL=postgresql://user:password@localhost:5432/ai_shopping_agent +# For development, you can use SQLite: +# DATABASE_URL=sqlite:///./ai_shopping_agent.db + +# Redis Configuration +REDIS_URL=redis://localhost:6379/0 + +# Application Settings +SECRET_KEY=your_secret_key_here +DEBUG=false +ENVIRONMENT=production + +# Webhook URLs +BASE_URL=https://your-domain.com +WEBHOOK_SECRET=your_webhook_secret + +# Email Configuration +SMTP_HOST=smtp.gmail.com +SMTP_PORT=587 +SMTP_USER=your_email@gmail.com +SMTP_PASSWORD=your_app_password + +# Analytics Configuration +ANALYTICS_ENABLED=true +EXPORT_ANALYTICS_DAILY=true +``` + +### 3. Database Setup + +```bash +# For PostgreSQL +createdb ai_shopping_agent + +# Run migrations (if using Django/SQLAlchemy) +python manage.py migrate + +# Or initialize tables manually +python scripts/init_database.py +``` + +### 4. WhatsApp Business API Setup + +#### 4.1 Create Facebook App + +1. Go to [Facebook Developers](https://developers.facebook.com/) +2. Create new app โ†’ Business โ†’ Create app +3. Add WhatsApp product to your app + +#### 4.2 Configure WhatsApp Business API + +1. Get temporary access token from WhatsApp Business API setup +2. Configure webhook URL: `https://your-domain.com/webhook/whatsapp` +3. Set webhook verify token in your `.env` file +4. Subscribe to webhook events: `messages`, `message_status` + +#### 4.3 Test WhatsApp Integration + +```bash +# Test webhook verification +curl -X GET "https://your-domain.com/webhook/whatsapp?hub.mode=subscribe&hub.challenge=CHALLENGE_ACCEPTED&hub.verify_token=your_verify_token" + +# Should return: CHALLENGE_ACCEPTED +``` + +## Deployment Options + +### Option 1: Docker Deployment (Recommended) + +#### 1.1 Create Dockerfile + +```dockerfile +# Dockerfile +FROM python:3.11-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +COPY requirements.txt . +RUN pip install -r requirements.txt + +# Copy application code +COPY . . + +# Install the package +RUN pip install -e . + +# Expose port +EXPOSE 8000 + +# Start application +CMD ["uvicorn", "samples.python.src.channels.unified_chat_manager:app", "--host", "0.0.0.0", "--port", "8000"] +``` + +#### 1.2 Create docker-compose.yml + +```yaml +version: '3.8' + +services: + app: + build: . + ports: + - "8000:8000" + environment: + - DATABASE_URL=postgresql://postgres:password@db:5432/ai_shopping_agent + - REDIS_URL=redis://redis:6379/0 + env_file: + - .env + depends_on: + - db + - redis + volumes: + - ./logs:/app/logs + + db: + image: postgres:13 + environment: + POSTGRES_DB: ai_shopping_agent + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + volumes: + - postgres_data:/var/lib/postgresql/data + ports: + - "5432:5432" + + redis: + image: redis:6-alpine + ports: + - "6379:6379" + + nginx: + image: nginx:alpine + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf + - ./ssl:/etc/nginx/ssl + depends_on: + - app + +volumes: + postgres_data: +``` + +#### 1.3 Deploy with Docker + +```bash +# Build and start services +docker-compose up -d + +# Check logs +docker-compose logs -f app + +# Scale if needed +docker-compose up -d --scale app=3 +``` + +### Option 2: Cloud Platform Deployment + +#### 2.1 Google Cloud Platform + +```bash +# Install Google Cloud CLI +curl https://sdk.cloud.google.com | bash + +# Initialize and authenticate +gcloud init +gcloud auth login + +# Create project +gcloud projects create ai-shopping-agent-prod +gcloud config set project ai-shopping-agent-prod + +# Deploy to Cloud Run +gcloud run deploy ai-shopping-agent \ + --source . \ + --platform managed \ + --region us-central1 \ + --allow-unauthenticated \ + --memory 2Gi \ + --cpu 2 \ + --min-instances 1 \ + --max-instances 10 +``` + +#### 2.2 AWS Deployment + +```bash +# Install AWS CLI and EB CLI +pip install awscli awsebcli + +# Initialize Elastic Beanstalk +eb init +eb create ai-shopping-agent-prod + +# Deploy +eb deploy +``` + +#### 2.3 Azure Deployment + +```bash +# Install Azure CLI +curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + +# Create resource group +az group create --name ai-shopping-agent --location eastus + +# Create web app +az webapp create --resource-group ai-shopping-agent --plan ai-shopping-agent-plan --name ai-shopping-agent --runtime "PYTHON|3.11" + +# Deploy +az webapp deployment source config --name ai-shopping-agent --resource-group ai-shopping-agent --repo-url https://github.com/your-repo/ai-shopping-agent --branch main +``` + +## Configuration + +### Application Settings + +Create `config.py`: + +```python +import os +from typing import Optional + +class Settings: + # API Configuration + GOOGLE_API_KEY: Optional[str] = os.getenv("GOOGLE_API_KEY") + WHATSAPP_TOKEN: str = os.getenv("WHATSAPP_BUSINESS_TOKEN", "") + + # Database + DATABASE_URL: str = os.getenv("DATABASE_URL", "sqlite:///./app.db") + + # Redis + REDIS_URL: str = os.getenv("REDIS_URL", "redis://localhost:6379/0") + + # Application + SECRET_KEY: str = os.getenv("SECRET_KEY", "dev-secret-key") + DEBUG: bool = os.getenv("DEBUG", "false").lower() == "true" + + # AI Curation Settings + MAX_RECOMMENDATIONS: int = 10 + DEFAULT_DISCOUNT_LIMIT: float = 0.25 + NEGOTIATION_ENABLED: bool = True + + # Checkout Optimization + CART_ABANDONMENT_TIMEOUT: int = 1800 # 30 minutes + RECOVERY_ATTEMPTS_LIMIT: int = 3 + + # Analytics + ANALYTICS_ENABLED: bool = True + ANALYTICS_RETENTION_DAYS: int = 365 + +settings = Settings() +``` + +### Nginx Configuration + +Create `nginx.conf`: + +```nginx +events { + worker_connections 1024; +} + +http { + upstream app { + server app:8000; + } + + server { + listen 80; + server_name your-domain.com; + return 301 https://$server_name$request_uri; + } + + server { + listen 443 ssl http2; + server_name your-domain.com; + + ssl_certificate /etc/nginx/ssl/cert.pem; + ssl_certificate_key /etc/nginx/ssl/key.pem; + + location / { + proxy_pass http://app; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /ws/ { + proxy_pass http://app; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + } + } +} +``` + +## Running the Application + +### Development Mode + +```bash +# Start individual components +cd samples/python + +# Start unified chat manager +python -m uvicorn src.channels.unified_chat_manager:app --reload --port 8000 + +# Start analytics dashboard (optional) +python -m uvicorn src.analytics.dashboard:app --reload --port 8001 + +# Start background workers +python -m celery worker -A src.tasks --loglevel=info +``` + +### Production Mode + +```bash +# Using gunicorn +gunicorn -w 4 -k uvicorn.workers.UvicornWorker samples.python.src.channels.unified_chat_manager:app --bind 0.0.0.0:8000 + +# Or using uvicorn +uvicorn samples.python.src.channels.unified_chat_manager:app --host 0.0.0.0 --port 8000 --workers 4 +``` + +## Testing + +### Unit Tests + +```bash +# Run all tests +python -m pytest tests/ + +# Run specific test files +python -m pytest tests/test_curation_engine.py +python -m pytest tests/test_negotiation_engine.py +python -m pytest tests/test_chat_manager.py + +# Run with coverage +python -m pytest --cov=src tests/ +``` + +### Integration Tests + +```bash +# Test WhatsApp integration +python tests/integration/test_whatsapp.py + +# Test end-to-end shopping flow +python tests/integration/test_shopping_flow.py + +# Load testing +python tests/load/test_concurrent_users.py +``` + +### Manual Testing + +```bash +# Test web chat locally +curl -X POST http://localhost:8000/webhook/whatsapp \ + -H "Content-Type: application/json" \ + -d '{"entry":[{"changes":[{"field":"messages","value":{"messages":[{"from":"1234567890","text":{"body":"Hi"},"timestamp":"1640995200"}]}}]}]}' + +# Test WhatsApp webhook +ngrok http 8000 +# Update webhook URL in Facebook Developer Console to ngrok URL +``` + +## Monitoring and Maintenance + +### Health Checks + +```python +# Add to your application +@app.get("/health") +async def health_check(): + return { + "status": "healthy", + "timestamp": datetime.now().isoformat(), + "services": { + "database": await check_database_health(), + "redis": await check_redis_health(), + "whatsapp_api": await check_whatsapp_health() + } + } +``` + +### Logging + +```python +import logging +import sys + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[ + logging.FileHandler('logs/app.log'), + logging.StreamHandler(sys.stdout) + ] +) +``` + +### Metrics and Alerts + +```bash +# Using Prometheus and Grafana +docker run -d -p 9090:9090 prom/prometheus +docker run -d -p 3000:3000 grafana/grafana + +# Add metrics endpoints to your app +from prometheus_client import Counter, Histogram, generate_latest + +message_counter = Counter('messages_total', 'Total messages processed') +response_time = Histogram('response_time_seconds', 'Response time') + +@app.get("/metrics") +async def metrics(): + return Response(generate_latest(), media_type="text/plain") +``` + +## Security + +### SSL/TLS Configuration + +```bash +# Get Let's Encrypt certificate +sudo apt install certbot +sudo certbot certonly --standalone -d your-domain.com + +# Or use CloudFlare for SSL termination +``` + +### API Security + +```python +from fastapi import HTTPException, Depends, Header + +async def verify_webhook_signature( + x_hub_signature_256: str = Header(None), + request: Request +): + if not x_hub_signature_256: + raise HTTPException(status_code=401, detail="Missing signature") + + body = await request.body() + expected_signature = hmac.new( + WEBHOOK_SECRET.encode(), + body, + hashlib.sha256 + ).hexdigest() + + if not hmac.compare_digest(f"sha256={expected_signature}", x_hub_signature_256): + raise HTTPException(status_code=401, detail="Invalid signature") +``` + +### Environment Security + +```bash +# Secure environment variables +chmod 600 .env + +# Use secret management +aws secretsmanager create-secret --name ai-shopping-agent-config +gcloud secrets create ai-shopping-agent-config +az keyvault secret set --vault-name ai-shopping-agent --name config +``` + +## Scaling and Performance + +### Horizontal Scaling + +```yaml +# Kubernetes deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ai-shopping-agent +spec: + replicas: 5 + selector: + matchLabels: + app: ai-shopping-agent + template: + metadata: + labels: + app: ai-shopping-agent + spec: + containers: + - name: app + image: ai-shopping-agent:latest + ports: + - containerPort: 8000 + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: db-config + key: url +``` + +### Performance Optimization + +```python +# Add caching +from functools import lru_cache +import redis + +redis_client = redis.Redis.from_url(settings.REDIS_URL) + +@lru_cache(maxsize=1000) +def get_cached_recommendations(customer_id: str, query: str): + cache_key = f"recommendations:{customer_id}:{hash(query)}" + cached = redis_client.get(cache_key) + if cached: + return json.loads(cached) + + # Generate recommendations + recommendations = generate_recommendations(customer_id, query) + redis_client.setex(cache_key, 3600, json.dumps(recommendations)) + return recommendations +``` + +### Database Optimization + +```sql +-- Add database indexes +CREATE INDEX idx_customer_events ON analytics_events(customer_id, timestamp); +CREATE INDEX idx_session_events ON analytics_events(session_id); +CREATE INDEX idx_event_type ON analytics_events(event_type); + +-- Partition large tables +CREATE TABLE analytics_events_2024_01 PARTITION OF analytics_events +FOR VALUES FROM ('2024-01-01') TO ('2024-02-01'); +``` + +## Troubleshooting + +### Common Issues + +1. **WhatsApp webhook not receiving messages** + ```bash + # Check webhook verification + curl -X GET "https://your-domain.com/webhook/whatsapp?hub.mode=subscribe&hub.challenge=test&hub.verify_token=your_token" + + # Check webhook logs + docker-compose logs -f app | grep webhook + ``` + +2. **High response times** + ```python + # Add performance monitoring + import time + + @app.middleware("http") + async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + response.headers["X-Process-Time"] = str(process_time) + return response + ``` + +3. **Database connection issues** + ```bash + # Check database connectivity + python -c "import psycopg2; conn = psycopg2.connect('your_db_url'); print('Connected successfully')" + + # Check connection pool + docker-compose logs db + ``` + +### Debugging Tools + +```bash +# Debug mode +export DEBUG=true +python -m uvicorn src.channels.unified_chat_manager:app --reload --log-level debug + +# Profile performance +pip install py-spy +py-spy top --pid $(pydoc python) + +# Memory profiling +pip install memory-profiler +python -m memory_profiler your_script.py +``` + +## API Documentation + +The application provides comprehensive API documentation at: + +- Swagger UI: `https://your-domain.com/docs` +- ReDoc: `https://your-domain.com/redoc` +- OpenAPI JSON: `https://your-domain.com/openapi.json` + +## Support and Maintenance + +### Regular Maintenance Tasks + +```bash +# Weekly tasks +python scripts/cleanup_old_sessions.py +python scripts/archive_old_analytics.py +python scripts/backup_database.py + +# Monthly tasks +python scripts/optimize_database.py +python scripts/update_ml_models.py +python scripts/generate_business_report.py +``` + +### Backup Strategy + +```bash +# Database backup +pg_dump ai_shopping_agent > backup_$(date +%Y%m%d).sql + +# Application backup +tar -czf app_backup_$(date +%Y%m%d).tar.gz /path/to/app + +# Automated backup with cron +0 2 * * * /path/to/backup_script.sh +``` + +### Update Procedure + +```bash +# Update application +git pull origin main +uv pip install -r requirements.txt +python manage.py migrate +docker-compose restart app + +# Zero-downtime deployment +docker-compose up -d --scale app=2 +# Wait for health checks +docker-compose up -d --scale app=1 +``` + +## Getting Help + +- **Documentation**: [AP2 Documentation](https://google-agentic-commerce.github.io/AP2/) +- **Issues**: [GitHub Issues](https://github.com/google-agentic-commerce/AP2/issues) +- **Community**: [Discord/Slack Community](#) +- **Email Support**: support@your-domain.com + +## License + +This project is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details. \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..38d74e0f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,121 @@ +# Multi-stage Dockerfile for AI Shopping Concierge +# Optimized for production deployment + +# Build stage +FROM python:3.11-slim as builder + +# Set build arguments +ARG BUILD_DATE +ARG VCS_REF +ARG VERSION + +# Add metadata labels +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.url="https://github.com/your-username/ai-shopping-concierge-ap2" \ + org.opencontainers.image.source="https://github.com/your-username/ai-shopping-concierge-ap2" \ + org.opencontainers.image.version=$VERSION \ + org.opencontainers.image.revision=$VCS_REF \ + org.opencontainers.image.vendor="AI Shopping Concierge" \ + org.opencontainers.image.title="AI Shopping Concierge" \ + org.opencontainers.image.description="Intelligent shopping assistant built on AP2 protocol" + +# Install system dependencies for building +RUN apt-get update && apt-get install -y \ + build-essential \ + curl \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Set up Python environment +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 + +# Create and use non-root user +RUN groupadd --gid 1000 appuser && \ + useradd --uid 1000 --gid appuser --shell /bin/bash --create-home appuser + +# Set work directory +WORKDIR /app + +# Copy requirements first for better caching +COPY requirements.txt . + +# Install Python dependencies +RUN pip install --upgrade pip && \ + pip install -r requirements.txt + +# Production stage +FROM python:3.11-slim as production + +# Install runtime system dependencies +RUN apt-get update && apt-get install -y \ + curl \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +# Create non-root user +RUN groupadd --gid 1000 appuser && \ + useradd --uid 1000 --gid appuser --shell /bin/bash --create-home appuser + +# Set environment variables +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + PATH="/home/appuser/.local/bin:$PATH" \ + PYTHONPATH="/app" + +# Set work directory +WORKDIR /app + +# Copy Python packages from builder stage +COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages +COPY --from=builder /usr/local/bin /usr/local/bin + +# Copy application code +COPY --chown=appuser:appuser . . + +# Initialize AP2 submodule if needed +RUN if [ -d ".git" ]; then \ + git submodule update --init --recursive; \ + fi + +# Create necessary directories +RUN mkdir -p logs config && \ + chown -R appuser:appuser /app + +# Switch to non-root user +USER appuser + +# Create health check script +RUN echo '#!/bin/bash\ncurl -f http://localhost:8000/health || exit 1' > /app/healthcheck.sh && \ + chmod +x /app/healthcheck.sh + +# Expose port +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ + CMD /app/healthcheck.sh + +# Default command +CMD ["python", "-m", "ai_shopping_agent"] + +# Development stage (optional) +FROM production as development + +USER root + +# Install development dependencies +RUN pip install pytest pytest-asyncio pytest-cov black isort mypy + +# Install debugging tools +RUN apt-get update && apt-get install -y \ + vim \ + htop \ + && rm -rf /var/lib/apt/lists/* + +USER appuser + +# Override command for development +CMD ["python", "-m", "ai_shopping_agent", "--reload"] \ No newline at end of file diff --git a/FORK_SYNC_GUIDE.md b/FORK_SYNC_GUIDE.md new file mode 100644 index 00000000..7e813b30 --- /dev/null +++ b/FORK_SYNC_GUIDE.md @@ -0,0 +1,217 @@ +# Fork Sync Configuration and Setup Guide +# For /AP2-shopping-concierge + +## ๐Ÿ”„ Fork Sync Setup Complete! + +Your fork is now properly configured to stay in sync with the upstream AP2 repository. Here's what has been set up: + +### Git Remote Configuration +``` +origin โ†’ https://github.com/AnkitaParakh/AP2-shopping-concierge.git (your fork) +upstream โ†’ https://github.com/google-agentic-commerce/AP2.git (Google's repo) +``` + +### Branch Structure +- `main` - Synced with upstream/main (AP2 core protocol) +- `ai-shopping-concierge-dev` - Your AI Shopping Concierge features + +## ๐Ÿš€ How to Sync with Upstream + +### Option 1: PowerShell (Windows) +```powershell +# Sync all branches +.\scripts\automation\sync-ankita-fork.ps1 + +# Sync specific branch only +.\scripts\automation\sync-ankita-fork.ps1 -Branch main + +# Force sync (even with uncommitted changes) +.\scripts\automation\sync-ankita-fork.ps1 -Force +``` + +### Option 2: Bash (Linux/Mac/WSL) +```bash +# Make script executable (first time only) +chmod +x scripts/automation/sync-ankita-fork.sh + +# Sync all branches +./scripts/automation/sync-ankita-fork.sh +``` + +### Option 3: Manual Sync +```bash +# Fetch latest upstream changes +git fetch upstream + +# Switch to main branch and sync +git checkout main +git merge upstream/main +git push origin main + +# Switch back to your development branch +git checkout ai-shopping-concierge-dev +``` + +## โฐ Automated Sync (Recommended) + +### Windows Task Scheduler +1. Open Task Scheduler (taskschd.msc) +2. Create Basic Task: + - Name: "Sync AP2 Fork" + - Trigger: Daily at 9:00 AM + - Action: Start a program + - Program: `powershell.exe` + - Arguments: `-ExecutionPolicy Bypass -File "C:\AP2\scripts\automation\sync-ankita-fork.ps1"` + - Start in: `C:\AP2` + +### GitHub Actions (Automated) +Create `.github/workflows/sync-upstream.yml` in your fork: + +```yaml +name: Sync Fork with Upstream + +on: + schedule: + - cron: '0 9 * * *' # Daily at 9 AM UTC + workflow_dispatch: # Manual trigger + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + fetch-depth: 0 + + - name: Sync upstream + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git remote add upstream https://github.com/google-agentic-commerce/AP2.git + git fetch upstream + git checkout main + git merge upstream/main --no-edit + git push origin main +``` + +## ๐Ÿ” Monitor Sync Status + +### Check if your fork is behind upstream: +```bash +# Check how many commits behind +git fetch upstream +git rev-list --count HEAD..upstream/main + +# View what changes are available +git log --oneline HEAD..upstream/main +``` + +### Check sync history: +```bash +# View recent commits +git log --oneline -10 + +# View merge commits (sync points) +git log --merges --oneline -5 +``` + +## ๐Ÿ› ๏ธ Development Workflow + +### Working on AI Shopping Concierge features: +```bash +# 1. Start from your development branch +git checkout ai-shopping-concierge-dev + +# 2. Create feature branch +git checkout -b feature/new-payment-method + +# 3. Make changes and commit +git add . +git commit -m "feat: Add new payment method support" + +# 4. Push to your fork +git push origin feature/new-payment-method + +# 5. Create PR in your fork: feature/new-payment-method โ†’ ai-shopping-concierge-dev +``` + +### When contributing back to AP2 protocol: +```bash +# 1. Start from synced main branch +git checkout main +git pull upstream main + +# 2. Create protocol improvement branch +git checkout -b protocol/improve-security + +# 3. Make changes and commit +git add . +git commit -m "feat: Enhance AP2 security validation" + +# 4. Push to your fork +git push origin protocol/improve-security + +# 5. Create PR to upstream: your-fork/protocol/improve-security โ†’ google-agentic-commerce/AP2:main +``` + +## ๐Ÿ“‹ Sync Checklist + +### Daily (Automated): +- โœ… Fetch upstream changes +- โœ… Merge to main branch +- โœ… Push to your fork +- โœ… Check for conflicts + +### Weekly (Manual Review): +- [ ] Review upstream changelog +- [ ] Test compatibility with your features +- [ ] Update dependencies if needed +- [ ] Merge main into your development branches + +### Monthly (Maintenance): +- [ ] Clean up old feature branches +- [ ] Review and update documentation +- [ ] Performance testing +- [ ] Security audit + +## ๐Ÿšจ Troubleshooting + +### Merge Conflicts: +```bash +# If sync fails due to conflicts: +git status # See conflicted files +# Edit files to resolve conflicts +git add . # Stage resolved files +git commit # Complete the merge +git push origin main # Push resolved version +``` + +### Reset to Upstream (Nuclear Option): +```bash +# WARNING: This will lose your changes on main branch +git fetch upstream +git checkout main +git reset --hard upstream/main +git push --force-with-lease origin main +``` + +### Fork is Far Behind: +```bash +# If your fork is many commits behind: +git fetch upstream +git rebase upstream/main # Replay your commits on top of upstream +git push --force-with-lease origin main +``` + +## ๐Ÿ“ž Support + +- **Sync Issues**: Check the troubleshooting section above +- **Upstream Changes**: Monitor [AP2 Releases](https://github.com/google-agentic-commerce/AP2/releases) +- **Feature Development**: Use the development workflow above + +--- + +**Your AI Shopping Concierge fork is now fully configured for automatic upstream synchronization! ๐ŸŽ‰** + +The sync will keep your protocol foundation up-to-date while preserving your product innovations in separate branches. \ No newline at end of file diff --git a/deployment/cloud-config.env.example b/deployment/cloud-config.env.example new file mode 100644 index 00000000..d93559b1 --- /dev/null +++ b/deployment/cloud-config.env.example @@ -0,0 +1,70 @@ +# Cloud Deployment Configuration +# Copy this file to cloud-config.env and update with your values + +# Cloud Provider Configuration +CLOUD_PROVIDER=aws # aws, gcp, or azure +REGION=us-west-2 # Deployment region +PROJECT_NAME=ai-shopping-concierge # Project name (no spaces or special chars) +ENVIRONMENT=production # dev, staging, or production +IMAGE_TAG=latest # Docker image tag +DOMAIN_NAME=api.your-domain.com # Optional: Custom domain + +# AWS Configuration +AWS_ACCOUNT_ID=123456789012 # Your AWS account ID +AWS_VPC_ID=vpc-12345678 # VPC ID for ECS deployment +AWS_SUBNET_IDS=subnet-12345,subnet-67890 # Comma-separated subnet IDs +AWS_SECURITY_GROUP_ID=sg-12345 # Security group ID + +# Google Cloud Configuration +GCP_PROJECT_ID=your-project-id # Google Cloud project ID +GCP_SERVICE_ACCOUNT=deploy@your-project.iam.gserviceaccount.com # Service account + +# Azure Configuration +AZURE_SUBSCRIPTION_ID=your-subscription-id # Azure subscription ID +AZURE_TENANT_ID=your-tenant-id # Azure tenant ID +AZURE_CLIENT_ID=your-client-id # Service principal client ID +AZURE_CLIENT_SECRET=your-client-secret # Service principal secret + +# Application Configuration +APP_CPU=1024 # CPU units (AWS) or cores (GCP/Azure) +APP_MEMORY=2048 # Memory in MB +APP_MIN_INSTANCES=2 # Minimum number of instances +APP_MAX_INSTANCES=10 # Maximum number of instances + +# Database Configuration (if using managed databases) +DB_INSTANCE_CLASS=db.t3.micro # AWS RDS instance class +DB_ALLOCATED_STORAGE=20 # Storage in GB +DB_ENGINE_VERSION=15.4 # PostgreSQL version + +# Redis Configuration (if using managed Redis) +REDIS_NODE_TYPE=cache.t3.micro # AWS ElastiCache node type +REDIS_NUM_CACHE_NODES=1 # Number of cache nodes + +# SSL/TLS Configuration +SSL_CERTIFICATE_ARN=arn:aws:acm:region:account:certificate/cert-id # AWS Certificate Manager ARN +SSL_CERTIFICATE_NAME=your-cert # GCP SSL certificate name + +# Monitoring and Logging +LOG_RETENTION_DAYS=30 # Log retention in days +ENABLE_MONITORING=true # Enable CloudWatch/Stackdriver monitoring +ENABLE_AUTOSCALING=true # Enable auto-scaling + +# Security Configuration +ENABLE_WAF=true # Enable Web Application Firewall +ALLOWED_CIDR_BLOCKS=0.0.0.0/0 # Comma-separated CIDR blocks for access +ENABLE_SECRETS_MANAGER=true # Use cloud secrets manager + +# Backup Configuration +ENABLE_AUTOMATED_BACKUPS=true # Enable automated database backups +BACKUP_RETENTION_PERIOD=7 # Backup retention in days +BACKUP_WINDOW=03:00-04:00 # Preferred backup window (UTC) + +# Cost Optimization +ENABLE_SPOT_INSTANCES=false # Use spot instances (AWS) +ENABLE_PREEMPTIBLE=false # Use preemptible instances (GCP) +ENABLE_LOW_PRIORITY=false # Use low priority instances (Azure) + +# Notification Configuration +SLACK_WEBHOOK_URL=https://hooks.slack.com/services/... # Slack notifications +EMAIL_NOTIFICATIONS=admin@your-domain.com # Email notifications +SNS_TOPIC_ARN=arn:aws:sns:region:account:topic-name # AWS SNS topic \ No newline at end of file diff --git a/deployment/kubernetes/production.yaml b/deployment/kubernetes/production.yaml new file mode 100644 index 00000000..57521b18 --- /dev/null +++ b/deployment/kubernetes/production.yaml @@ -0,0 +1,561 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ai-shopping-concierge + labels: + app: ai-shopping-concierge + version: production +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-config + namespace: ai-shopping-concierge +data: + ENVIRONMENT: "production" + DEBUG: "false" + DATABASE_URL: "postgresql+asyncpg://postgres:$(DB_PASSWORD)@postgres-service:5432/ai_shopping_concierge" + REDIS_URL: "redis://:$(REDIS_PASSWORD)@redis-service:6379/0" +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secrets + namespace: ai-shopping-concierge +type: Opaque +data: + # Base64 encoded secrets - update these in your deployment pipeline + DB_PASSWORD: cG9zdGdyZXM= # postgres + REDIS_PASSWORD: cmVkaXM= # redis + GOOGLE_AI_API_KEY: "" + WHATSAPP_ACCESS_TOKEN: "" + WHATSAPP_VERIFY_TOKEN: "" + WHATSAPP_PHONE_NUMBER_ID: "" + AP2_MERCHANT_ID: "" + AP2_API_KEY: "" + STRIPE_API_KEY: "" + PAYPAL_CLIENT_ID: "" + PAYPAL_CLIENT_SECRET: "" + SECRET_KEY: "" + SENTRY_DSN: "" + GRAFANA_ADMIN_USER: YWRtaW4= # admin + GRAFANA_ADMIN_PASSWORD: YWRtaW4= # admin +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ai-shopping-concierge + namespace: ai-shopping-concierge + labels: + app: ai-shopping-concierge + tier: application +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + app: ai-shopping-concierge + tier: application + template: + metadata: + labels: + app: ai-shopping-concierge + tier: application + spec: + containers: + - name: ai-shopping-concierge + image: ghcr.io/your-username/ai-shopping-concierge:latest + ports: + - containerPort: 8000 + env: + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: app-secrets + key: DB_PASSWORD + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: app-secrets + key: REDIS_PASSWORD + - name: GOOGLE_AI_API_KEY + valueFrom: + secretKeyRef: + name: app-secrets + key: GOOGLE_AI_API_KEY + - name: WHATSAPP_ACCESS_TOKEN + valueFrom: + secretKeyRef: + name: app-secrets + key: WHATSAPP_ACCESS_TOKEN + - name: WHATSAPP_VERIFY_TOKEN + valueFrom: + secretKeyRef: + name: app-secrets + key: WHATSAPP_VERIFY_TOKEN + - name: WHATSAPP_PHONE_NUMBER_ID + valueFrom: + secretKeyRef: + name: app-secrets + key: WHATSAPP_PHONE_NUMBER_ID + - name: AP2_MERCHANT_ID + valueFrom: + secretKeyRef: + name: app-secrets + key: AP2_MERCHANT_ID + - name: AP2_API_KEY + valueFrom: + secretKeyRef: + name: app-secrets + key: AP2_API_KEY + - name: STRIPE_API_KEY + valueFrom: + secretKeyRef: + name: app-secrets + key: STRIPE_API_KEY + - name: PAYPAL_CLIENT_ID + valueFrom: + secretKeyRef: + name: app-secrets + key: PAYPAL_CLIENT_ID + - name: PAYPAL_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: app-secrets + key: PAYPAL_CLIENT_SECRET + - name: SECRET_KEY + valueFrom: + secretKeyRef: + name: app-secrets + key: SECRET_KEY + - name: SENTRY_DSN + valueFrom: + secretKeyRef: + name: app-secrets + key: SENTRY_DSN + envFrom: + - configMapRef: + name: app-config + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "1000m" + livenessProbe: + httpGet: + path: /health + port: 8000 + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /health + port: 8000 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + volumeMounts: + - name: logs + mountPath: /app/logs + - name: config + mountPath: /app/config + readOnly: true + volumes: + - name: logs + persistentVolumeClaim: + claimName: logs-pvc + - name: config + configMap: + name: app-config + imagePullSecrets: + - name: ghcr-secret +--- +apiVersion: v1 +kind: Service +metadata: + name: ai-shopping-concierge-service + namespace: ai-shopping-concierge + labels: + app: ai-shopping-concierge + tier: application +spec: + type: ClusterIP + ports: + - port: 8000 + targetPort: 8000 + protocol: TCP + selector: + app: ai-shopping-concierge + tier: application +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + namespace: ai-shopping-concierge + labels: + app: postgres + tier: database +spec: + serviceName: postgres-service + replicas: 1 + selector: + matchLabels: + app: postgres + tier: database + template: + metadata: + labels: + app: postgres + tier: database + spec: + containers: + - name: postgres + image: postgres:15-alpine + ports: + - containerPort: 5432 + env: + - name: POSTGRES_DB + value: ai_shopping_concierge + - name: POSTGRES_USER + value: postgres + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: app-secrets + key: DB_PASSWORD + - name: POSTGRES_INITDB_ARGS + value: "--encoding=UTF-8" + resources: + requests: + memory: "1Gi" + cpu: "1000m" + limits: + memory: "2Gi" + cpu: "2000m" + livenessProbe: + exec: + command: + - pg_isready + - -U + - postgres + - -d + - ai_shopping_concierge + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - pg_isready + - -U + - postgres + - -d + - ai_shopping_concierge + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: postgres-storage + mountPath: /var/lib/postgresql/data + - name: backup + mountPath: /backup + volumes: + - name: backup + persistentVolumeClaim: + claimName: backup-pvc + volumeClaimTemplates: + - metadata: + name: postgres-storage + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "fast-ssd" + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres-service + namespace: ai-shopping-concierge + labels: + app: postgres + tier: database +spec: + type: ClusterIP + ports: + - port: 5432 + targetPort: 5432 + protocol: TCP + selector: + app: postgres + tier: database +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis + namespace: ai-shopping-concierge + labels: + app: redis + tier: cache +spec: + serviceName: redis-service + replicas: 1 + selector: + matchLabels: + app: redis + tier: cache + template: + metadata: + labels: + app: redis + tier: cache + spec: + containers: + - name: redis + image: redis:7-alpine + ports: + - containerPort: 6379 + command: + - redis-server + - --requirepass + - $(REDIS_PASSWORD) + - --appendonly + - "yes" + - --appendfsync + - everysec + - --maxmemory + - 1gb + - --maxmemory-policy + - allkeys-lru + env: + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: app-secrets + key: REDIS_PASSWORD + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "500m" + livenessProbe: + exec: + command: + - redis-cli + - --no-auth-warning + - -a + - $(REDIS_PASSWORD) + - ping + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 3 + readinessProbe: + exec: + command: + - redis-cli + - --no-auth-warning + - -a + - $(REDIS_PASSWORD) + - ping + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + volumeMounts: + - name: redis-storage + mountPath: /data + volumeClaimTemplates: + - metadata: + name: redis-storage + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "fast-ssd" + resources: + requests: + storage: 10Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: redis-service + namespace: ai-shopping-concierge + labels: + app: redis + tier: cache +spec: + type: ClusterIP + ports: + - port: 6379 + targetPort: 6379 + protocol: TCP + selector: + app: redis + tier: cache +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ai-shopping-concierge-ingress + namespace: ai-shopping-concierge + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + nginx.ingress.kubernetes.io/rate-limit: "100" + nginx.ingress.kubernetes.io/rate-limit-window: "1m" + nginx.ingress.kubernetes.io/body-size: "10m" +spec: + tls: + - hosts: + - api.your-domain.com + secretName: ai-shopping-concierge-tls + rules: + - host: api.your-domain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ai-shopping-concierge-service + port: + number: 8000 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: logs-pvc + namespace: ai-shopping-concierge +spec: + accessModes: + - ReadWriteMany + storageClassName: "nfs" + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: backup-pvc + namespace: ai-shopping-concierge +spec: + accessModes: + - ReadWriteMany + storageClassName: "nfs" + resources: + requests: + storage: 200Gi +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: ai-shopping-concierge-pdb + namespace: ai-shopping-concierge +spec: + minAvailable: 2 + selector: + matchLabels: + app: ai-shopping-concierge + tier: application +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: ai-shopping-concierge-hpa + namespace: ai-shopping-concierge +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: ai-shopping-concierge + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 70 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 + scaleUp: + stabilizationWindowSeconds: 60 + policies: + - type: Percent + value: 50 + periodSeconds: 60 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: ai-shopping-concierge-netpol + namespace: ai-shopping-concierge +spec: + podSelector: + matchLabels: + app: ai-shopping-concierge + policyTypes: + - Ingress + - Egress + ingress: + - from: + - namespaceSelector: + matchLabels: + name: ingress-nginx + - podSelector: + matchLabels: + app: postgres + - podSelector: + matchLabels: + app: redis + ports: + - protocol: TCP + port: 8000 + egress: + - to: + - podSelector: + matchLabels: + app: postgres + ports: + - protocol: TCP + port: 5432 + - to: + - podSelector: + matchLabels: + app: redis + ports: + - protocol: TCP + port: 6379 + - to: [] # Allow all outbound for external APIs + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 80 \ No newline at end of file diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 00000000..df4caceb --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,166 @@ +# AI Shopping Concierge - Docker Compose for Development +# This file provides a complete development environment + +version: '3.8' + +services: + # Main application + ai-shopping-concierge: + build: + context: . + target: development + args: + BUILD_DATE: ${BUILD_DATE:-} + VCS_REF: ${VCS_REF:-} + VERSION: ${VERSION:-dev} + ports: + - "8000:8000" + - "5678:5678" # Debug port + environment: + - ENVIRONMENT=development + - DEBUG=true + - DATABASE_URL=postgresql+asyncpg://postgres:devpassword@db:5432/ai_shopping_concierge_dev + - REDIS_URL=redis://redis:6379/0 + - GOOGLE_AI_API_KEY=${GOOGLE_AI_API_KEY:-dev_key} + - WHATSAPP_ACCESS_TOKEN=${WHATSAPP_ACCESS_TOKEN:-dev_token} + - WHATSAPP_VERIFY_TOKEN=${WHATSAPP_VERIFY_TOKEN:-dev_verify} + - WHATSAPP_PHONE_NUMBER_ID=${WHATSAPP_PHONE_NUMBER_ID:-dev_phone} + - AP2_MERCHANT_ID=${AP2_MERCHANT_ID:-dev_merchant} + - SECRET_KEY=development_secret_key_change_in_production + volumes: + - .:/app + - ./logs:/app/logs + - ./config:/app/config + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + stdin_open: true + tty: true + + # PostgreSQL database + db: + image: postgres:15-alpine + environment: + POSTGRES_DB: ai_shopping_concierge_dev + POSTGRES_USER: postgres + POSTGRES_PASSWORD: devpassword + POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8" + volumes: + - postgres_dev_data:/var/lib/postgresql/data + - ./deployment/postgres/init.sql:/docker-entrypoint-initdb.d/init.sql:ro + ports: + - "5432:5432" + restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d ai_shopping_concierge_dev"] + interval: 10s + timeout: 5s + retries: 5 + + # Redis cache + redis: + image: redis:7-alpine + command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru + volumes: + - redis_dev_data:/data + ports: + - "6379:6379" + restart: unless-stopped + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 3s + retries: 5 + + # Nginx for development (optional) + nginx: + image: nginx:alpine + ports: + - "80:80" + volumes: + - ./deployment/nginx/nginx.dev.conf:/etc/nginx/nginx.conf:ro + depends_on: + - ai-shopping-concierge + restart: unless-stopped + + # pgAdmin for database management + pgadmin: + image: dpage/pgadmin4:latest + environment: + PGADMIN_DEFAULT_EMAIL: admin@ai-shopping-concierge.com + PGADMIN_DEFAULT_PASSWORD: devpassword + PGADMIN_CONFIG_SERVER_MODE: 'False' + volumes: + - pgadmin_dev_data:/var/lib/pgadmin + ports: + - "5050:80" + depends_on: + - db + restart: unless-stopped + + # Redis Commander for Redis management + redis-commander: + image: ghcr.io/joeferner/redis-commander:latest + environment: + REDIS_HOSTS: local:redis:6379 + ports: + - "8081:8081" + depends_on: + - redis + restart: unless-stopped + + # Mailhog for email testing + mailhog: + image: mailhog/mailhog:latest + ports: + - "1025:1025" # SMTP + - "8025:8025" # Web UI + restart: unless-stopped + + # Prometheus for metrics (optional) + prometheus: + image: prom/prometheus:latest + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--storage.tsdb.retention.time=200h' + - '--web.enable-lifecycle' + volumes: + - ./deployment/prometheus/prometheus.dev.yml:/etc/prometheus/prometheus.yml:ro + - prometheus_dev_data:/prometheus + ports: + - "9090:9090" + restart: unless-stopped + + # Grafana for metrics visualization (optional) + grafana: + image: grafana/grafana:latest + environment: + GF_SECURITY_ADMIN_USER: admin + GF_SECURITY_ADMIN_PASSWORD: devpassword + GF_USERS_ALLOW_SIGN_UP: false + volumes: + - grafana_dev_data:/var/lib/grafana + - ./deployment/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro + - ./deployment/grafana/datasources:/etc/grafana/provisioning/datasources:ro + ports: + - "3000:3000" + depends_on: + - prometheus + restart: unless-stopped + +volumes: + postgres_dev_data: + redis_dev_data: + pgadmin_dev_data: + prometheus_dev_data: + grafana_dev_data: + +networks: + default: + name: ai-shopping-concierge-dev \ No newline at end of file diff --git a/docker-compose.production.yml b/docker-compose.production.yml new file mode 100644 index 00000000..6482f087 --- /dev/null +++ b/docker-compose.production.yml @@ -0,0 +1,292 @@ +# AI Shopping Concierge - Production Docker Compose +# This file is for production deployment + +version: '3.8' + +services: + # Main application + ai-shopping-concierge: + image: ghcr.io/your-username/ai-shopping-concierge:latest + ports: + - "8000:8000" + environment: + - ENVIRONMENT=production + - DEBUG=false + - DATABASE_URL=postgresql+asyncpg://postgres:${DB_PASSWORD}@db:5432/ai_shopping_concierge + - REDIS_URL=redis://:${REDIS_PASSWORD}@redis:6379/0 + - GOOGLE_AI_API_KEY=${GOOGLE_AI_API_KEY} + - WHATSAPP_ACCESS_TOKEN=${WHATSAPP_ACCESS_TOKEN} + - WHATSAPP_VERIFY_TOKEN=${WHATSAPP_VERIFY_TOKEN} + - WHATSAPP_PHONE_NUMBER_ID=${WHATSAPP_PHONE_NUMBER_ID} + - AP2_MERCHANT_ID=${AP2_MERCHANT_ID} + - AP2_API_KEY=${AP2_API_KEY} + - STRIPE_API_KEY=${STRIPE_API_KEY} + - PAYPAL_CLIENT_ID=${PAYPAL_CLIENT_ID} + - PAYPAL_CLIENT_SECRET=${PAYPAL_CLIENT_SECRET} + - SECRET_KEY=${SECRET_KEY} + - SENTRY_DSN=${SENTRY_DSN} + volumes: + - ./logs:/app/logs + - ./config:/app/config:ro + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + restart: unless-stopped + deploy: + replicas: 3 + resources: + limits: + cpus: '1.0' + memory: 1G + reservations: + cpus: '0.5' + memory: 512M + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # PostgreSQL database with backup + db: + image: postgres:15-alpine + environment: + POSTGRES_DB: ai_shopping_concierge + POSTGRES_USER: postgres + POSTGRES_PASSWORD: ${DB_PASSWORD} + POSTGRES_INITDB_ARGS: "--encoding=UTF-8" + volumes: + - postgres_data:/var/lib/postgresql/data + - ./backup:/backup + - ./deployment/postgres/postgresql.conf:/etc/postgresql/postgresql.conf:ro + ports: + - "5432:5432" + restart: unless-stopped + deploy: + resources: + limits: + cpus: '2.0' + memory: 2G + reservations: + cpus: '1.0' + memory: 1G + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d ai_shopping_concierge"] + interval: 10s + timeout: 5s + retries: 5 + command: > + postgres + -c max_connections=200 + -c shared_buffers=256MB + -c effective_cache_size=1GB + -c work_mem=4MB + -c maintenance_work_mem=64MB + + # Redis with persistence + redis: + image: redis:7-alpine + command: > + redis-server + --requirepass ${REDIS_PASSWORD} + --appendonly yes + --appendfsync everysec + --maxmemory 1gb + --maxmemory-policy allkeys-lru + --save 900 1 + --save 300 10 + --save 60 10000 + volumes: + - redis_data:/data + ports: + - "6379:6379" + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.5' + memory: 1G + reservations: + cpus: '0.25' + memory: 512M + healthcheck: + test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "${REDIS_PASSWORD}", "ping"] + interval: 10s + timeout: 3s + retries: 5 + + # Nginx reverse proxy with SSL + nginx: + image: nginx:alpine + ports: + - "80:80" + - "443:443" + volumes: + - ./deployment/nginx/nginx.production.conf:/etc/nginx/nginx.conf:ro + - ./deployment/ssl:/etc/ssl:ro + - ./deployment/nginx/html:/usr/share/nginx/html:ro + depends_on: + - ai-shopping-concierge + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.5' + memory: 512M + reservations: + cpus: '0.25' + memory: 256M + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Database backup service + db-backup: + image: postgres:15-alpine + environment: + PGPASSWORD: ${DB_PASSWORD} + volumes: + - ./backup:/backup + - ./deployment/scripts/backup.sh:/backup.sh:ro + depends_on: + - db + restart: "no" + deploy: + replicas: 0 + command: > + sh -c " + while true; do + sleep 86400 + /backup.sh + done + " + + # Log aggregator + fluentd: + image: fluent/fluentd:v1.16-debian-1 + volumes: + - ./logs:/fluentd/log + - ./deployment/fluentd/fluent.conf:/fluentd/etc/fluent.conf:ro + ports: + - "24224:24224" + - "24224:24224/udp" + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.25' + memory: 256M + + # Monitoring - Prometheus + prometheus: + image: prom/prometheus:latest + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--storage.tsdb.retention.time=720h' + - '--web.enable-lifecycle' + - '--web.external-url=https://your-domain.com/prometheus/' + volumes: + - ./deployment/prometheus/prometheus.production.yml:/etc/prometheus/prometheus.yml:ro + - prometheus_data:/prometheus + ports: + - "9090:9090" + restart: unless-stopped + deploy: + resources: + limits: + cpus: '1.0' + memory: 1G + + # Monitoring - Grafana + grafana: + image: grafana/grafana:latest + environment: + GF_SECURITY_ADMIN_USER: ${GRAFANA_ADMIN_USER} + GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD} + GF_USERS_ALLOW_SIGN_UP: false + GF_SERVER_ROOT_URL: https://your-domain.com/grafana/ + GF_SERVER_SERVE_FROM_SUB_PATH: true + volumes: + - grafana_data:/var/lib/grafana + - ./deployment/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro + - ./deployment/grafana/datasources:/etc/grafana/provisioning/datasources:ro + ports: + - "3000:3000" + depends_on: + - prometheus + restart: unless-stopped + deploy: + resources: + limits: + cpus: '0.5' + memory: 512M + + # Security - Fail2ban + fail2ban: + image: lscr.io/linuxserver/fail2ban:latest + environment: + PUID: 1000 + PGID: 1000 + TZ: UTC + volumes: + - fail2ban_data:/config + - ./logs:/logs:ro + - ./deployment/fail2ban:/config/fail2ban:ro + cap_add: + - NET_ADMIN + - NET_RAW + network_mode: host + restart: unless-stopped + +volumes: + postgres_data: + driver: local + driver_opts: + type: none + o: bind + device: /data/postgres + + redis_data: + driver: local + driver_opts: + type: none + o: bind + device: /data/redis + + prometheus_data: + driver: local + driver_opts: + type: none + o: bind + device: /data/prometheus + + grafana_data: + driver: local + driver_opts: + type: none + o: bind + device: /data/grafana + + fail2ban_data: + driver: local + +networks: + default: + name: ai-shopping-concierge-prod + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 \ No newline at end of file diff --git a/docs/ai-shopping-concierge/api-reference.md b/docs/ai-shopping-concierge/api-reference.md new file mode 100644 index 00000000..055fe514 --- /dev/null +++ b/docs/ai-shopping-concierge/api-reference.md @@ -0,0 +1,680 @@ +# AI Shopping Concierge - API Reference + +Complete API documentation for the AI Shopping Concierge platform. + +## Base URL + +- **Development**: `http://localhost:8000` +- **Staging**: `https://staging.ai-shopping-concierge.com` +- **Production**: `https://api.ai-shopping-concierge.com` + +## Authentication + +Most endpoints require API key authentication: + +```http +Authorization: Bearer YOUR_API_KEY +``` + +## Core Endpoints + +### Health Check + +Check application health and status. + +**GET** `/health` + +```bash +curl http://localhost:8000/health +``` + +**Response:** +```json +{ + "status": "healthy", + "timestamp": "2025-09-22T10:30:00Z", + "services": { + "database": "connected", + "redis": "connected", + "whatsapp": "active", + "ai_engine": "ready" + }, + "version": "1.0.0" +} +``` + +## WhatsApp Integration + +### Webhook Verification + +**GET** `/webhook/whatsapp` + +Query Parameters: +- `hub.mode`: Subscription mode (subscribe) +- `hub.challenge`: Challenge string to echo back +- `hub.verify_token`: Verification token + +### Message Processing + +**POST** `/webhook/whatsapp` + +Processes incoming WhatsApp messages. + +**Request Body:** +```json +{ + "object": "whatsapp_business_account", + "entry": [{ + "id": "WHATSAPP_BUSINESS_ACCOUNT_ID", + "changes": [{ + "value": { + "messaging_product": "whatsapp", + "metadata": { + "display_phone_number": "PHONE_NUMBER", + "phone_number_id": "PHONE_NUMBER_ID" + }, + "messages": [{ + "from": "CUSTOMER_PHONE_NUMBER", + "id": "MESSAGE_ID", + "timestamp": "TIMESTAMP", + "text": { + "body": "MESSAGE_TEXT" + }, + "type": "text" + }] + }, + "field": "messages" + }] + }] +} +``` + +## Shopping Agent API + +### Start Chat Session + +**POST** `/api/v1/chat/session` + +Create a new shopping session. + +**Request:** +```json +{ + "customer_id": "customer_123", + "channel": "whatsapp", + "phone_number": "+1234567890" +} +``` + +**Response:** +```json +{ + "session_id": "session_abc123", + "status": "active", + "created_at": "2025-09-22T10:30:00Z" +} +``` + +### Send Message + +**POST** `/api/v1/chat/message` + +Send a message to the AI shopping assistant. + +**Request:** +```json +{ + "session_id": "session_abc123", + "message": "I'm looking for a laptop under $1000", + "message_type": "text" +} +``` + +**Response:** +```json +{ + "response": "I'd be happy to help you find a laptop under $1000! I found several great options...", + "products": [ + { + "id": "laptop_001", + "name": "Dell Inspiron 15", + "price": 899.99, + "currency": "USD", + "image_url": "https://...", + "description": "15.6\" laptop with Intel i5 processor" + } + ], + "actions": [ + { + "type": "quick_reply", + "title": "View Details", + "payload": "view_product_laptop_001" + }, + { + "type": "quick_reply", + "title": "Compare Options", + "payload": "compare_laptops" + } + ] +} +``` + +### Get Session History + +**GET** `/api/v1/chat/session/{session_id}/history` + +**Response:** +```json +{ + "session_id": "session_abc123", + "messages": [ + { + "timestamp": "2025-09-22T10:30:00Z", + "sender": "customer", + "message": "I'm looking for a laptop", + "type": "text" + }, + { + "timestamp": "2025-09-22T10:30:05Z", + "sender": "ai", + "message": "I'd be happy to help you find a laptop!", + "type": "text", + "products": [...] + } + ] +} +``` + +## Product Curation API + +### Get Recommendations + +**POST** `/api/v1/curation/recommend` + +Get AI-powered product recommendations. + +**Request:** +```json +{ + "customer_id": "customer_123", + "query": "gaming laptop", + "budget_min": 800, + "budget_max": 1500, + "preferences": { + "brand": ["ASUS", "MSI"], + "screen_size": "15-17 inches", + "use_case": "gaming" + }, + "limit": 10 +} +``` + +**Response:** +```json +{ + "recommendations": [ + { + "product_id": "laptop_gaming_001", + "name": "ASUS ROG Strix G15", + "price": 1299.99, + "currency": "USD", + "confidence_score": 0.95, + "match_reasons": [ + "Perfect for gaming", + "Within budget range", + "Preferred brand" + ], + "specifications": { + "processor": "AMD Ryzen 7", + "graphics": "NVIDIA RTX 3060", + "ram": "16GB", + "storage": "512GB SSD" + } + } + ], + "total_results": 25, + "search_metadata": { + "query_understanding": "Customer looking for high-performance gaming laptop", + "filters_applied": ["price_range", "brand_preference", "category"] + } +} +``` + +### Create Product Bundle + +**POST** `/api/v1/curation/bundle` + +Create smart product bundles. + +**Request:** +```json +{ + "primary_product_id": "laptop_gaming_001", + "customer_profile": { + "interests": ["gaming", "productivity"], + "budget_total": 1800 + }, + "bundle_type": "complementary" +} +``` + +**Response:** +```json +{ + "bundle_id": "bundle_abc123", + "name": "Gaming Setup Bundle", + "products": [ + { + "id": "laptop_gaming_001", + "name": "ASUS ROG Strix G15", + "price": 1299.99, + "role": "primary" + }, + { + "id": "mouse_gaming_001", + "name": "Logitech G Pro X", + "price": 149.99, + "role": "accessory" + }, + { + "id": "headset_gaming_001", + "name": "SteelSeries Arctis 7", + "price": 179.99, + "role": "accessory" + } + ], + "total_price": 1629.97, + "bundle_discount": 50.00, + "final_price": 1579.97, + "savings": 99.99 +} +``` + +## Negotiation Engine API + +### Initiate Negotiation + +**POST** `/api/v1/negotiation/start` + +Start price negotiation for a product or bundle. + +**Request:** +```json +{ + "session_id": "session_abc123", + "product_id": "laptop_gaming_001", + "customer_signal": "price_concern", + "context": "Customer hesitated at checkout due to price" +} +``` + +**Response:** +```json +{ + "negotiation_id": "neg_xyz789", + "status": "active", + "offers": [ + { + "type": "discount", + "description": "10% off for today only", + "original_price": 1299.99, + "discounted_price": 1169.99, + "savings": 130.00, + "expires_at": "2025-09-22T23:59:59Z" + }, + { + "type": "bundle_upgrade", + "description": "Add gaming mouse for just $50 more", + "additional_cost": 50.00, + "additional_value": 149.99, + "savings": 99.99 + } + ] +} +``` + +### Respond to Offer + +**POST** `/api/v1/negotiation/{negotiation_id}/respond` + +Customer response to negotiation offer. + +**Request:** +```json +{ + "response": "accept", + "offer_id": "offer_discount_001" +} +``` + +**Response:** +```json +{ + "status": "accepted", + "final_offer": { + "product_id": "laptop_gaming_001", + "final_price": 1169.99, + "discount_applied": 130.00, + "terms": "Valid until 2025-09-22T23:59:59Z" + }, + "next_step": "proceed_to_checkout" +} +``` + +## Checkout & Payment API + +### Start Checkout + +**POST** `/api/v1/checkout/start` + +Initialize checkout process with automatic payment handling. + +**Request:** +```json +{ + "session_id": "session_abc123", + "items": [ + { + "product_id": "laptop_gaming_001", + "quantity": 1, + "price": 1169.99, + "applied_discounts": ["negotiation_discount"] + } + ], + "customer_details": { + "email": "customer@example.com", + "phone": "+1234567890" + } +} +``` + +**Response:** +```json +{ + "checkout_id": "checkout_def456", + "session_id": "session_abc123", + "status": "initiated", + "currency_detected": "USD", + "customer_location": "US", + "payment_options": [ + { + "method": "ap2", + "display_name": "AP2 Pay", + "fees": "1.5%", + "recommended": true + }, + { + "method": "stripe", + "display_name": "Credit Card", + "fees": "2.9%" + } + ], + "totals": { + "subtotal": 1169.99, + "tax": 87.75, + "total": 1257.74, + "currency": "USD" + } +} +``` + +### Process Payment + +**POST** `/api/v1/checkout/{checkout_id}/payment` + +Process payment with automatic currency conversion. + +**Request:** +```json +{ + "payment_method": "ap2", + "currency_preference": "EUR", + "billing_address": { + "country": "DE", + "postal_code": "10115", + "city": "Berlin" + } +} +``` + +**Response:** +```json +{ + "payment_id": "pay_ghi789", + "status": "processing", + "currency_conversion": { + "original_amount": 1257.74, + "original_currency": "USD", + "converted_amount": 1068.58, + "target_currency": "EUR", + "exchange_rate": 0.85, + "conversion_fee": 5.34 + }, + "estimated_settlement": "2025-09-23T10:30:00Z" +} +``` + +### Check Payment Status + +**GET** `/api/v1/payment/{payment_id}/status` + +**Response:** +```json +{ + "payment_id": "pay_ghi789", + "status": "completed", + "authorization_id": "auth_123", + "capture_id": "cap_456", + "settlement_id": "settle_789", + "timeline": { + "initiated_at": "2025-09-22T10:30:00Z", + "authorized_at": "2025-09-22T10:30:02Z", + "captured_at": "2025-09-22T10:30:05Z", + "settled_at": "2025-09-23T10:30:00Z" + }, + "fees": { + "processing_fee": 18.89, + "currency_conversion_fee": 5.34, + "total_fees": 24.23 + } +} +``` + +## Analytics API + +### Get Performance Metrics + +**GET** `/api/v1/analytics/performance` + +Query Parameters: +- `start_date`: Start date (ISO 8601) +- `end_date`: End date (ISO 8601) +- `granularity`: hour, day, week, month + +**Response:** +```json +{ + "period": { + "start": "2025-09-15T00:00:00Z", + "end": "2025-09-22T00:00:00Z" + }, + "metrics": { + "total_conversations": 1250, + "conversion_rate": 0.23, + "average_order_value": 287.50, + "total_revenue": 82512.50, + "customer_satisfaction": 4.7, + "response_time_avg": 1.2 + }, + "top_products": [ + { + "product_id": "laptop_gaming_001", + "name": "ASUS ROG Strix G15", + "sales_count": 45, + "revenue": 58499.55 + } + ] +} +``` + +### Get Conversation Analytics + +**GET** `/api/v1/analytics/conversations` + +**Response:** +```json +{ + "conversation_metrics": { + "total_messages": 12450, + "avg_messages_per_session": 8.5, + "most_common_intents": [ + { + "intent": "product_search", + "count": 3200, + "percentage": 25.7 + }, + { + "intent": "price_inquiry", + "count": 2100, + "percentage": 16.9 + } + ], + "abandonment_points": [ + { + "stage": "payment_method_selection", + "rate": 0.15 + }, + { + "stage": "shipping_details", + "rate": 0.08 + } + ] + } +} +``` + +## Error Handling + +All endpoints return consistent error responses: + +```json +{ + "error": { + "code": "INVALID_REQUEST", + "message": "The request is missing required parameters", + "details": { + "missing_fields": ["customer_id", "message"] + }, + "request_id": "req_abc123" + } +} +``` + +### Error Codes + +- `INVALID_REQUEST` (400): Malformed request +- `UNAUTHORIZED` (401): Invalid API key +- `FORBIDDEN` (403): Insufficient permissions +- `NOT_FOUND` (404): Resource not found +- `RATE_LIMITED` (429): Too many requests +- `INTERNAL_ERROR` (500): Server error +- `SERVICE_UNAVAILABLE` (503): Service temporarily down + +## Rate Limits + +- **Development**: 100 requests/minute +- **Production**: 1000 requests/minute +- **Webhook**: No limit (verified requests only) + +Rate limit headers: +``` +X-RateLimit-Limit: 1000 +X-RateLimit-Remaining: 999 +X-RateLimit-Reset: 1632312000 +``` + +## SDKs + +### JavaScript/TypeScript + +```bash +npm install ai-shopping-concierge-sdk +``` + +```javascript +import { AIShoppingConcierge } from 'ai-shopping-concierge-sdk'; + +const client = new AIShoppingConcierge({ + apiKey: 'your-api-key', + baseUrl: 'https://api.ai-shopping-concierge.com' +}); + +const session = await client.chat.createSession({ + customerId: 'customer_123', + channel: 'web' +}); + +const response = await client.chat.sendMessage({ + sessionId: session.session_id, + message: 'I need a laptop for work' +}); +``` + +### Python + +```bash +pip install ai-shopping-concierge-sdk +``` + +```python +from ai_shopping_concierge import Client + +client = Client( + api_key='your-api-key', + base_url='https://api.ai-shopping-concierge.com' +) + +session = client.chat.create_session( + customer_id='customer_123', + channel='web' +) + +response = client.chat.send_message( + session_id=session.session_id, + message='I need a laptop for work' +) +``` + +## Webhooks + +### Conversation Events + +Register webhooks to receive real-time conversation events: + +**POST** `/api/v1/webhooks/register` + +```json +{ + "url": "https://your-app.com/webhook/conversations", + "events": ["message.sent", "session.started", "purchase.completed"], + "secret": "your-webhook-secret" +} +``` + +**Webhook Payload:** +```json +{ + "event": "purchase.completed", + "timestamp": "2025-09-22T10:30:00Z", + "data": { + "session_id": "session_abc123", + "customer_id": "customer_123", + "order_id": "order_def456", + "total_amount": 1257.74, + "currency": "USD" + }, + "signature": "sha256=..." +} +``` + +For complete integration examples and advanced use cases, see our [GitHub repository](https://github.com/your-username/ai-shopping-concierge-ap2). \ No newline at end of file diff --git a/docs/ai-shopping-concierge/deployment.md b/docs/ai-shopping-concierge/deployment.md new file mode 100644 index 00000000..6af9a6f4 --- /dev/null +++ b/docs/ai-shopping-concierge/deployment.md @@ -0,0 +1,1042 @@ +# AI Shopping Concierge - Deployment Guide + +Complete guide for deploying the AI Shopping Concierge to production environments. + +## ๐Ÿ—๏ธ Architecture Overview + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Load Balancer โ”‚โ”€โ”€โ”€โ”€โ”‚ Nginx โ”‚โ”€โ”€โ”€โ”€โ”‚ AI Shopping โ”‚ +โ”‚ (CloudFlare) โ”‚ โ”‚ (Reverse โ”‚ โ”‚ Concierge โ”‚ +โ”‚ โ”‚ โ”‚ Proxy) โ”‚ โ”‚ (FastAPI) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ + โ”‚ WhatsApp โ”‚ โ”‚ + โ”‚ Business API โ”‚ โ”‚ + โ”‚ โ”‚ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ + โ”‚ PostgreSQL โ”‚ โ”‚ Redis โ”‚ โ”‚ + โ”‚ (Database) โ”‚ โ”‚ (Cache) โ”‚ โ”‚ + โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ + โ”‚ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ + โ”‚ Google AI โ”‚ โ”‚ Payment โ”‚ โ”‚ + โ”‚ (Gemini) โ”‚โ”€โ”€โ”€โ”€โ”‚ Processors โ”‚โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ โ”‚ โ”‚ (AP2/Stripe) โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## ๐Ÿš€ Deployment Options + +### Option 1: Docker Compose (Recommended for Small-Medium Scale) + +**Pros:** +- Easy to set up and manage +- Good for single-server deployments +- Built-in service orchestration +- Suitable for up to 10,000 users + +**Cons:** +- Single point of failure +- Limited horizontal scaling +- Manual scaling required + +### Option 2: Kubernetes (Recommended for Large Scale) + +**Pros:** +- Auto-scaling capabilities +- High availability +- Zero-downtime deployments +- Suitable for 100,000+ users + +**Cons:** +- Complex setup +- Requires Kubernetes expertise +- Higher operational overhead + +### Option 3: Cloud Services (Managed) + +**Pros:** +- Fully managed infrastructure +- Auto-scaling +- Built-in monitoring +- No server management + +**Cons:** +- Higher costs +- Platform lock-in +- Less control + +## ๐Ÿณ Docker Deployment + +### Prerequisites + +- Docker Engine 20.10+ +- Docker Compose 2.0+ +- 4GB+ RAM +- 20GB+ disk space + +### 1. Production Configuration + +Create production configuration files: + +**docker-compose.production.yml:** +```yaml +version: '3.8' + +services: + ai-shopping-concierge: + build: . + ports: + - "8000:8000" + environment: + - ENVIRONMENT=production + - DATABASE_URL=postgresql+asyncpg://postgres:${DB_PASSWORD}@db:5432/ai_shopping_concierge + - REDIS_URL=redis://redis:6379/0 + - GOOGLE_AI_API_KEY=${GOOGLE_AI_API_KEY} + - WHATSAPP_ACCESS_TOKEN=${WHATSAPP_ACCESS_TOKEN} + - AP2_MERCHANT_ID=${AP2_MERCHANT_ID} + depends_on: + - db + - redis + volumes: + - ./config:/app/config:ro + - ./logs:/app/logs + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + db: + image: postgres:15 + environment: + POSTGRES_DB: ai_shopping_concierge + POSTGRES_USER: postgres + POSTGRES_PASSWORD: ${DB_PASSWORD} + volumes: + - postgres_data:/var/lib/postgresql/data + - ./backup:/backup + restart: unless-stopped + ports: + - "5432:5432" + + redis: + image: redis:7-alpine + volumes: + - redis_data:/data + restart: unless-stopped + ports: + - "6379:6379" + command: redis-server --appendonly yes --maxmemory 1gb --maxmemory-policy allkeys-lru + + nginx: + image: nginx:alpine + ports: + - "80:80" + - "443:443" + volumes: + - ./deployment/nginx.production.conf:/etc/nginx/nginx.conf:ro + - ./deployment/ssl:/etc/ssl:ro + depends_on: + - ai-shopping-concierge + restart: unless-stopped + +volumes: + postgres_data: + redis_data: +``` + +**deployment/nginx.production.conf:** +```nginx +events { + worker_connections 1024; +} + +http { + upstream ai_shopping_concierge { + server ai-shopping-concierge:8000; + } + + # Rate limiting + limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; + + server { + listen 80; + server_name your-domain.com; + return 301 https://$server_name$request_uri; + } + + server { + listen 443 ssl http2; + server_name your-domain.com; + + ssl_certificate /etc/ssl/your-domain.crt; + ssl_certificate_key /etc/ssl/your-domain.key; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers HIGH:!aNULL:!MD5; + + # Security headers + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + add_header X-Frame-Options DENY always; + add_header X-Content-Type-Options nosniff always; + + # API endpoints + location /api/ { + limit_req zone=api burst=20 nodelay; + proxy_pass http://ai_shopping_concierge; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # Webhook endpoints (no rate limiting) + location /webhook/ { + proxy_pass http://ai_shopping_concierge; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # Health check + location /health { + proxy_pass http://ai_shopping_concierge; + access_log off; + } + } +} +``` + +### 2. Environment Variables + +Create `.env.production`: +```bash +# Database +DB_PASSWORD=your_secure_database_password + +# API Keys +GOOGLE_AI_API_KEY=your_google_ai_api_key +WHATSAPP_ACCESS_TOKEN=your_whatsapp_access_token +WHATSAPP_VERIFY_TOKEN=your_whatsapp_verify_token +WHATSAPP_PHONE_NUMBER_ID=your_phone_number_id + +# Payment Processors +AP2_MERCHANT_ID=your_ap2_merchant_id +AP2_API_KEY=your_ap2_api_key +STRIPE_API_KEY=your_stripe_api_key +PAYPAL_CLIENT_ID=your_paypal_client_id +PAYPAL_CLIENT_SECRET=your_paypal_client_secret + +# Security +SECRET_KEY=your_super_secret_key_change_this_in_production + +# Monitoring (optional) +SENTRY_DSN=your_sentry_dsn +DATADOG_API_KEY=your_datadog_api_key +``` + +### 3. SSL Certificates + +Option A: Let's Encrypt (Free) +```bash +# Install certbot +sudo apt install certbot + +# Get certificate +sudo certbot certonly --standalone -d your-domain.com + +# Copy certificates +sudo cp /etc/letsencrypt/live/your-domain.com/fullchain.pem deployment/ssl/your-domain.crt +sudo cp /etc/letsencrypt/live/your-domain.com/privkey.pem deployment/ssl/your-domain.key +``` + +Option B: Commercial Certificate +```bash +# Place your certificates in deployment/ssl/ +cp your-domain.crt deployment/ssl/ +cp your-domain.key deployment/ssl/ +``` + +### 4. Deploy + +```bash +# Build and start services +docker-compose -f docker-compose.production.yml up -d --build + +# Check status +docker-compose -f docker-compose.production.yml ps + +# View logs +docker-compose -f docker-compose.production.yml logs -f ai-shopping-concierge +``` + +## โ˜ธ๏ธ Kubernetes Deployment + +### Prerequisites + +- Kubernetes cluster 1.20+ +- kubectl configured +- Helm 3.0+ (optional) + +### 1. Create Namespace + +```yaml +# namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: ai-shopping-concierge +``` + +### 2. ConfigMap and Secrets + +```yaml +# configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: ai-shopping-concierge-config + namespace: ai-shopping-concierge +data: + app.yaml: | + app: + name: "AI Shopping Concierge" + debug: false + host: "0.0.0.0" + port: 8000 + database: + echo: false + pool_size: 20 + max_overflow: 30 +--- +apiVersion: v1 +kind: Secret +metadata: + name: ai-shopping-concierge-secrets + namespace: ai-shopping-concierge +type: Opaque +stringData: + DATABASE_URL: "postgresql+asyncpg://postgres:password@postgres-service:5432/ai_shopping_concierge" + REDIS_URL: "redis://redis-service:6379/0" + GOOGLE_AI_API_KEY: "your_google_ai_api_key" + WHATSAPP_ACCESS_TOKEN: "your_whatsapp_access_token" + AP2_MERCHANT_ID: "your_ap2_merchant_id" + SECRET_KEY: "your_super_secret_key" +``` + +### 3. Database Deployment + +```yaml +# postgres.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres + namespace: ai-shopping-concierge +spec: + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:15 + env: + - name: POSTGRES_DB + value: ai_shopping_concierge + - name: POSTGRES_USER + value: postgres + - name: POSTGRES_PASSWORD + value: password + ports: + - containerPort: 5432 + volumeMounts: + - name: postgres-storage + mountPath: /var/lib/postgresql/data + volumes: + - name: postgres-storage + persistentVolumeClaim: + claimName: postgres-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres-service + namespace: ai-shopping-concierge +spec: + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc + namespace: ai-shopping-concierge +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi +``` + +### 4. Application Deployment + +```yaml +# ai-shopping-concierge.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ai-shopping-concierge + namespace: ai-shopping-concierge +spec: + replicas: 3 + selector: + matchLabels: + app: ai-shopping-concierge + template: + metadata: + labels: + app: ai-shopping-concierge + spec: + containers: + - name: ai-shopping-concierge + image: your-registry/ai-shopping-concierge:latest + ports: + - containerPort: 8000 + env: + - name: ENVIRONMENT + value: "production" + envFrom: + - secretRef: + name: ai-shopping-concierge-secrets + - configMapRef: + name: ai-shopping-concierge-config + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "1000m" + livenessProbe: + httpGet: + path: /health + port: 8000 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 8000 + initialDelaySeconds: 5 + periodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: ai-shopping-concierge-service + namespace: ai-shopping-concierge +spec: + selector: + app: ai-shopping-concierge + ports: + - port: 8000 + targetPort: 8000 + type: ClusterIP +``` + +### 5. Ingress + +```yaml +# ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ai-shopping-concierge-ingress + namespace: ai-shopping-concierge + annotations: + kubernetes.io/ingress.class: "nginx" + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/rate-limit: "100" +spec: + tls: + - hosts: + - your-domain.com + secretName: ai-shopping-concierge-tls + rules: + - host: your-domain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: ai-shopping-concierge-service + port: + number: 8000 +``` + +### 6. Deploy to Kubernetes + +```bash +# Apply configurations +kubectl apply -f namespace.yaml +kubectl apply -f configmap.yaml +kubectl apply -f postgres.yaml +kubectl apply -f redis.yaml +kubectl apply -f ai-shopping-concierge.yaml +kubectl apply -f ingress.yaml + +# Check status +kubectl get pods -n ai-shopping-concierge +kubectl get services -n ai-shopping-concierge +kubectl get ingress -n ai-shopping-concierge + +# View logs +kubectl logs -f deployment/ai-shopping-concierge -n ai-shopping-concierge +``` + +## โ˜๏ธ Cloud Provider Deployments + +### AWS (Amazon Web Services) + +**Services Used:** +- **ECS/Fargate**: Container orchestration +- **RDS**: Managed PostgreSQL +- **ElastiCache**: Managed Redis +- **ALB**: Load balancer +- **Route 53**: DNS +- **CloudWatch**: Monitoring + +**Deployment Script:** +```bash +# AWS CLI deployment script +aws ecs create-cluster --cluster-name ai-shopping-concierge + +# Create task definition +aws ecs register-task-definition --cli-input-json file://task-definition.json + +# Create service +aws ecs create-service \ + --cluster ai-shopping-concierge \ + --service-name ai-shopping-concierge-service \ + --task-definition ai-shopping-concierge:1 \ + --desired-count 3 \ + --load-balancers file://load-balancers.json +``` + +### Google Cloud Platform + +**Services Used:** +- **Cloud Run**: Serverless containers +- **Cloud SQL**: Managed PostgreSQL +- **Memorystore**: Managed Redis +- **Cloud Load Balancing**: Load balancer +- **Cloud DNS**: DNS +- **Cloud Monitoring**: Monitoring + +**Deployment Script:** +```bash +# Build and push container +gcloud builds submit --tag gcr.io/PROJECT_ID/ai-shopping-concierge + +# Deploy to Cloud Run +gcloud run deploy ai-shopping-concierge \ + --image gcr.io/PROJECT_ID/ai-shopping-concierge \ + --platform managed \ + --region us-central1 \ + --allow-unauthenticated \ + --set-env-vars="DATABASE_URL=...,REDIS_URL=..." +``` + +### Microsoft Azure + +**Services Used:** +- **Container Instances**: Container hosting +- **Database for PostgreSQL**: Managed PostgreSQL +- **Cache for Redis**: Managed Redis +- **Application Gateway**: Load balancer +- **DNS Zone**: DNS +- **Monitor**: Monitoring + +## ๐Ÿ”ง Configuration Management + +### Environment-Specific Configs + +**config/production.yaml:** +```yaml +app: + debug: false + workers: 4 + log_level: "INFO" + +database: + pool_size: 20 + max_overflow: 30 + echo: false + +redis: + max_connections: 100 + +rate_limiting: + enabled: true + requests_per_minute: 1000 + +monitoring: + enabled: true + sentry_dsn: "${SENTRY_DSN}" + datadog_api_key: "${DATADOG_API_KEY}" +``` + +### Secrets Management + +**Option 1: Kubernetes Secrets** +```bash +kubectl create secret generic api-keys \ + --from-literal=google-ai-key=your_key \ + --from-literal=whatsapp-token=your_token +``` + +**Option 2: HashiCorp Vault** +```bash +vault kv put secret/ai-shopping-concierge \ + google_ai_key=your_key \ + whatsapp_token=your_token +``` + +**Option 3: Cloud Provider Secret Managers** +```bash +# AWS Secrets Manager +aws secretsmanager create-secret \ + --name ai-shopping-concierge/api-keys \ + --secret-string '{"google_ai_key":"your_key"}' + +# Google Secret Manager +gcloud secrets create google-ai-key --data-file=- +``` + +## ๐Ÿ“Š Monitoring & Observability + +### Health Checks + +The application provides comprehensive health checks: + +```http +GET /health +``` + +**Response:** +```json +{ + "status": "healthy", + "timestamp": "2025-09-22T10:30:00Z", + "services": { + "database": "connected", + "redis": "connected", + "whatsapp": "active", + "ai_engine": "ready" + }, + "version": "1.0.0", + "uptime": "72h35m12s" +} +``` + +### Metrics Collection + +**Prometheus Configuration:** +```yaml +# prometheus.yml +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'ai-shopping-concierge' + static_configs: + - targets: ['ai-shopping-concierge:8000'] + metrics_path: /metrics +``` + +**Key Metrics:** +- `http_requests_total`: Total HTTP requests +- `http_request_duration_seconds`: Request duration +- `active_conversations`: Active chat sessions +- `conversion_rate`: Purchase conversion rate +- `payment_success_rate`: Payment success rate + +### Logging + +**Structured Logging Configuration:** +```python +LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'json': { + 'format': '%(asctime)s %(name)s %(levelname)s %(message)s', + 'datefmt': '%Y-%m-%dT%H:%M:%S' + } + }, + 'handlers': { + 'file': { + 'level': 'INFO', + 'class': 'logging.handlers.RotatingFileHandler', + 'filename': '/app/logs/app.log', + 'maxBytes': 50 * 1024 * 1024, # 50MB + 'backupCount': 5, + 'formatter': 'json' + } + }, + 'loggers': { + 'ai_shopping_agent': { + 'handlers': ['file'], + 'level': 'INFO', + 'propagate': True + } + } +} +``` + +### Alerting + +**Sample Alerts:** +```yaml +# alerts.yml +groups: + - name: ai-shopping-concierge + rules: + - alert: HighErrorRate + expr: rate(http_requests_total{status=~"5.."}[5m]) > 0.1 + for: 5m + labels: + severity: critical + annotations: + summary: "High error rate detected" + + - alert: DatabaseConnectionDown + expr: up{job="postgres"} == 0 + for: 1m + labels: + severity: critical + annotations: + summary: "Database connection is down" + + - alert: LowConversionRate + expr: conversion_rate < 0.1 + for: 15m + labels: + severity: warning + annotations: + summary: "Conversion rate is below 10%" +``` + +## ๐Ÿ”’ Security + +### SSL/TLS Configuration + +**Nginx SSL Configuration:** +```nginx +ssl_protocols TLSv1.2 TLSv1.3; +ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384; +ssl_prefer_server_ciphers off; +ssl_session_cache shared:SSL:10m; +ssl_session_timeout 10m; +``` + +### Web Application Firewall + +**CloudFlare Rules:** +``` +# Block suspicious patterns +(http.request.uri.path contains "/admin" and ip.src ne YOUR_ADMIN_IP) +(http.request.method eq "POST" and rate(1m) > 30) +(http.user_agent contains "bot" and not http.user_agent contains "whatsapp") +``` + +### API Security + +**Rate Limiting:** +```python +from slowapi import Limiter, _rate_limit_exceeded_handler +from slowapi.util import get_remote_address + +limiter = Limiter(key_func=get_remote_address) + +@app.post("/api/v1/chat/message") +@limiter.limit("10/minute") +async def send_message(request: Request, ...): + pass +``` + +**Input Validation:** +```python +from pydantic import BaseModel, validator + +class MessageRequest(BaseModel): + message: str + session_id: str + + @validator('message') + def validate_message(cls, v): + if len(v) > 1000: + raise ValueError('Message too long') + return v.strip() +``` + +## ๐Ÿš€ Performance Optimization + +### Database Optimization + +**PostgreSQL Configuration:** +```sql +-- Optimize for production workload +ALTER SYSTEM SET shared_buffers = '256MB'; +ALTER SYSTEM SET effective_cache_size = '1GB'; +ALTER SYSTEM SET work_mem = '4MB'; +ALTER SYSTEM SET maintenance_work_mem = '64MB'; +ALTER SYSTEM SET checkpoint_completion_target = 0.9; +ALTER SYSTEM SET wal_buffers = '16MB'; +ALTER SYSTEM SET default_statistics_target = 100; + +-- Create indexes for common queries +CREATE INDEX idx_conversations_customer_id ON conversations(customer_id); +CREATE INDEX idx_messages_session_id ON messages(session_id); +CREATE INDEX idx_orders_created_at ON orders(created_at); +``` + +### Redis Configuration + +```redis +# redis.conf +maxmemory 1gb +maxmemory-policy allkeys-lru +save 900 1 +save 300 10 +save 60 10000 +``` + +### Application Performance + +**Connection Pooling:** +```python +from sqlalchemy.pool import QueuePool + +engine = create_async_engine( + DATABASE_URL, + poolclass=QueuePool, + pool_size=20, + max_overflow=30, + pool_pre_ping=True, + pool_recycle=3600 +) +``` + +**Caching Strategy:** +```python +import redis +from functools import wraps + +redis_client = redis.Redis.from_url(REDIS_URL) + +def cache_result(expiry=300): + def decorator(func): + @wraps(func) + async def wrapper(*args, **kwargs): + cache_key = f"{func.__name__}:{hash(str(args) + str(kwargs))}" + cached = redis_client.get(cache_key) + + if cached: + return json.loads(cached) + + result = await func(*args, **kwargs) + redis_client.setex(cache_key, expiry, json.dumps(result)) + return result + return wrapper + return decorator +``` + +## ๐Ÿ”„ CI/CD Pipeline + +### GitHub Actions + +**.github/workflows/deploy.yml:** +```yaml +name: Deploy AI Shopping Concierge + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + pip install -r requirements.txt + pip install pytest pytest-asyncio + + - name: Run tests + run: pytest tests/ -v + + - name: Run security scan + run: | + pip install safety bandit + safety check + bandit -r ai_shopping_agent/ + + build: + needs: test + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' + + steps: + - uses: actions/checkout@v3 + + - name: Build Docker image + run: | + docker build -t ai-shopping-concierge:${{ github.sha }} . + docker tag ai-shopping-concierge:${{ github.sha }} ai-shopping-concierge:latest + + - name: Push to registry + run: | + echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin + docker push ai-shopping-concierge:${{ github.sha }} + docker push ai-shopping-concierge:latest + + deploy-staging: + needs: build + runs-on: ubuntu-latest + environment: staging + + steps: + - uses: actions/checkout@v3 + + - name: Deploy to staging + run: | + # Deploy to staging environment + ./scripts/automation/deploy.sh staging + + deploy-production: + needs: deploy-staging + runs-on: ubuntu-latest + environment: production + if: github.ref == 'refs/heads/main' + + steps: + - uses: actions/checkout@v3 + + - name: Deploy to production + run: | + # Deploy to production environment + ./scripts/automation/deploy.sh production +``` + +## ๐Ÿ“‹ Post-Deployment Checklist + +### Immediate (0-1 hour) + +- [ ] Verify all services are running +- [ ] Check health endpoints return 200 OK +- [ ] Test WhatsApp webhook receives messages +- [ ] Verify database connectivity +- [ ] Check Redis cache is working +- [ ] Test payment processing with small amounts +- [ ] Verify SSL certificates are valid +- [ ] Check monitoring dashboards show green status + +### Short-term (1-24 hours) + +- [ ] Monitor error rates and response times +- [ ] Check log files for any warnings or errors +- [ ] Verify backup procedures are working +- [ ] Test auto-scaling if configured +- [ ] Check all integrations (WhatsApp, Google AI, payment processors) +- [ ] Verify rate limiting is working +- [ ] Test disaster recovery procedures + +### Long-term (1-7 days) + +- [ ] Monitor conversion rates and user satisfaction +- [ ] Review performance metrics and optimize if needed +- [ ] Check cost optimization opportunities +- [ ] Update documentation based on deployment experience +- [ ] Plan next release and improvements +- [ ] Review security logs and access patterns +- [ ] Optimize resource allocation based on usage patterns + +## ๐Ÿ†˜ Troubleshooting + +### Common Issues + +**Application Won't Start:** +```bash +# Check logs +docker-compose logs ai-shopping-concierge + +# Check environment variables +docker-compose exec ai-shopping-concierge env | grep -E "(DATABASE|REDIS|GOOGLE)" + +# Check dependencies +docker-compose exec ai-shopping-concierge pip list +``` + +**Database Connection Issues:** +```bash +# Test database connectivity +docker-compose exec ai-shopping-concierge python -c " +import asyncpg +import asyncio +async def test(): + conn = await asyncpg.connect('postgresql://...') + print('Connected successfully') + await conn.close() +asyncio.run(test()) +" +``` + +**High Memory Usage:** +```bash +# Check memory usage +docker stats + +# Optimize Python memory +export PYTHONMALLOC=malloc +export MALLOC_TRIM_THRESHOLD_=100000 +``` + +**SSL Certificate Issues:** +```bash +# Check certificate validity +openssl x509 -in deployment/ssl/your-domain.crt -text -noout + +# Test SSL connection +openssl s_client -connect your-domain.com:443 +``` + +For additional support, check the troubleshooting guide or open an issue on GitHub. \ No newline at end of file diff --git a/docs/ai-shopping-concierge/getting-started.md b/docs/ai-shopping-concierge/getting-started.md new file mode 100644 index 00000000..3984e930 --- /dev/null +++ b/docs/ai-shopping-concierge/getting-started.md @@ -0,0 +1,308 @@ +# AI Shopping Concierge - Getting Started Guide + +Welcome to the AI Shopping Concierge! This guide will help you get up and running with your intelligent shopping assistant built on the AP2 protocol. + +## ๐Ÿš€ Quick Start + +### Prerequisites + +- Python 3.10 or higher +- Git +- Docker (optional, for containerized deployment) +- WhatsApp Business API account +- Google AI API key + +### 1. Repository Setup + +First, set up your repositories using our automated scripts: + +```bash +# Clone the AP2 repository +git clone https://github.com/google-agentic-commerce/AP2.git +cd AP2 + +# Run the repository setup script +./scripts/repository-setup/1-fork-and-setup.sh your-github-username +``` + +Follow the manual steps to: +1. Fork the AP2 repository on GitHub +2. Create your AI Shopping Concierge repository +3. Set up authentication (SSH keys or tokens) + +Then continue with: + +```bash +# Complete the setup +./scripts/repository-setup/2-sync-and-verify.sh your-github-username +./scripts/repository-setup/3-migrate-code.sh +``` + +### 2. Configuration + +Configure your API keys and settings: + +```bash +cd ../ai-shopping-concierge-ap2 +cp config/secrets.yaml.example config/secrets.yaml +``` + +Edit `config/secrets.yaml` with your actual API keys: + +```yaml +# WhatsApp Business API +WHATSAPP_VERIFY_TOKEN: "your_verify_token_here" +WHATSAPP_ACCESS_TOKEN: "your_access_token_here" +WHATSAPP_PHONE_NUMBER_ID: "your_phone_number_id_here" + +# Google AI +GOOGLE_AI_API_KEY: "your_google_ai_api_key_here" + +# Database (for production) +DATABASE_URL: "postgresql+asyncpg://user:password@localhost/ai_shopping_concierge" + +# Payment processors +AP2_MERCHANT_ID: "your_ap2_merchant_id" +STRIPE_API_KEY: "sk_test_your_stripe_key" +``` + +### 3. Installation + +Install dependencies: + +```bash +pip install -r requirements.txt +``` + +### 4. Run the Application + +Start your AI Shopping Concierge: + +```bash +python -m ai_shopping_agent +``` + +You should see: + +``` +๐Ÿš€ Starting AI Shopping Concierge... +โœ… AI Shopping Concierge started successfully! +๐Ÿ’ฌ WhatsApp integration ready +๐Ÿค– AI curation engine ready +๐Ÿ’ฐ Negotiation engine ready +๐Ÿ’ณ Checkout optimizer ready +๐Ÿ“Š Analytics engine ready +``` + +## ๐Ÿ”ง Development Setup + +### Using Docker (Recommended) + +For a complete development environment: + +```bash +# Start all services +docker-compose up --build + +# Or for background running +docker-compose up -d --build +``` + +This starts: +- AI Shopping Concierge application (port 8000) +- PostgreSQL database (port 5432) +- Redis cache (port 6379) +- Nginx proxy (ports 80/443) + +### Manual Setup + +If you prefer manual setup: + +1. **Database Setup**: + ```bash + # Install PostgreSQL + # Create database + createdb ai_shopping_concierge + ``` + +2. **Redis Setup**: + ```bash + # Install Redis + redis-server + ``` + +3. **Environment Variables**: + ```bash + export DATABASE_URL="postgresql+asyncpg://user:password@localhost/ai_shopping_concierge" + export REDIS_URL="redis://localhost:6379/0" + ``` + +## ๐Ÿ“ฑ WhatsApp Integration + +### 1. Set up WhatsApp Business API + +1. Go to [Meta for Developers](https://developers.facebook.com/) +2. Create a new app and add WhatsApp product +3. Get your access token and phone number ID +4. Configure webhook URL: `https://your-domain.com/webhook/whatsapp` + +### 2. Configure Webhook + +Your AI Shopping Concierge automatically handles WhatsApp webhooks at: +- **GET** `/webhook/whatsapp` - Webhook verification +- **POST** `/webhook/whatsapp` - Message processing + +### 3. Test Integration + +Send a message to your WhatsApp Business number: +``` +Hi! I'm looking for a laptop +``` + +The AI should respond with product recommendations and start a conversation. + +## ๐Ÿค– AI Features + +### Product Curation + +The AI automatically: +- Analyzes customer messages for intent and preferences +- Searches product catalogs for relevant items +- Provides personalized recommendations +- Learns from customer interactions + +### Smart Negotiation + +- Detects price sensitivity signals +- Offers dynamic discounts and bundles +- Creates urgency with limited-time offers +- Suggests alternatives and upgrades + +### Payment Processing + +- Auto-detects customer currency from location +- Converts prices in real-time +- Supports multiple payment methods (AP2, Stripe, PayPal) +- Handles international transactions with low fees + +## ๐Ÿ”’ Security + +### API Key Management + +- Store sensitive keys in `config/secrets.yaml` (never commit this file) +- Use environment variables in production +- Rotate keys regularly + +### Webhook Security + +WhatsApp webhooks are automatically verified using your verify token. + +### Payment Security + +All payment processing uses: +- TLS encryption for data in transit +- Tokenized payment methods +- PCI DSS compliant processors +- Real-time fraud detection + +## ๐Ÿ“Š Monitoring + +### Health Checks + +Check application health: +```bash +curl http://localhost:8000/health +``` + +### Analytics Dashboard + +View analytics at: `http://localhost:8000/analytics` + +- Conversion rates +- Popular products +- Customer satisfaction +- Revenue metrics + +### Logs + +Application logs are available in: +- Console output (development) +- `logs/app.log` (production) +- Docker logs: `docker-compose logs ai-shopping-concierge` + +## ๐Ÿš€ Deployment + +### Staging Deployment + +```bash +./scripts/automation/deploy.sh staging +``` + +### Production Deployment + +```bash +./scripts/automation/deploy.sh production +``` + +## ๐Ÿ”„ Maintenance + +### Keep AP2 Core Updated + +```bash +./scripts/automation/sync-upstream.sh +``` + +### Regular Maintenance + +```bash +./scripts/automation/maintenance.sh +``` + +## ๐Ÿ†˜ Troubleshooting + +### Common Issues + +**WhatsApp messages not received:** +- Check webhook URL configuration +- Verify verify token matches +- Check firewall/port settings + +**AI not responding:** +- Verify Google AI API key +- Check API quotas and limits +- Review error logs + +**Payment failures:** +- Check payment processor API keys +- Verify merchant account status +- Review transaction logs + +**Database connection issues:** +- Check DATABASE_URL format +- Verify database server is running +- Check network connectivity + +### Getting Help + +1. Check the [API Reference](api-reference.md) +2. Review [troubleshooting guide](troubleshooting.md) +3. Check logs for error messages +4. Open an issue on GitHub + +## ๐ŸŽฏ Next Steps + +1. **Customize Product Catalog**: Add your products to the AI curation engine +2. **Configure Payment Methods**: Set up your preferred payment processors +3. **Brand the Experience**: Customize messages and UI to match your brand +4. **Scale Infrastructure**: Set up monitoring and auto-scaling for production +5. **Train the AI**: Improve recommendations with your specific product data + +## ๐Ÿ“š Additional Resources + +- [API Reference](api-reference.md) +- [Deployment Guide](deployment.md) +- [Troubleshooting](troubleshooting.md) +- [AP2 Protocol Documentation](https://github.com/google-agentic-commerce/AP2) +- [WhatsApp Business API Docs](https://developers.facebook.com/docs/whatsapp/) + +Welcome to the future of AI-powered commerce! ๐Ÿ›๏ธ๐Ÿค– \ No newline at end of file diff --git a/product-layer/README.md b/product-layer/README.md new file mode 100644 index 00000000..1749d2ef --- /dev/null +++ b/product-layer/README.md @@ -0,0 +1,72 @@ +# AI Shopping Concierge - Product Layer Structure + +This directory contains the organized code structure for your AI Shopping Concierge product layer. This is a preview of how your code will be organized in your separate product repository. + +## Directory Structure + +``` +product-layer/ +โ”œโ”€โ”€ ai-shopping-agent/ # Main application modules +โ”‚ โ”œโ”€โ”€ whatsapp-integration/ # WhatsApp Business API integration +โ”‚ โ”‚ โ”œโ”€โ”€ whatsapp_integration.py # Main WhatsApp agent +โ”‚ โ”‚ โ””โ”€โ”€ unified_chat_manager.py # Multi-channel chat management +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ ai-curation/ # AI-powered product curation +โ”‚ โ”‚ โ””โ”€โ”€ smart_curation_engine.py # Product recommendations and personalization +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ negotiation-engine/ # Smart negotiation and bundling +โ”‚ โ”‚ โ””โ”€โ”€ negotiation_engine.py # Dynamic pricing and deal optimization +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ checkout-optimizer/ # Enhanced checkout processing +โ”‚ โ”‚ โ””โ”€โ”€ checkout_optimizer.py # Payment processing with currency conversion +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ analytics/ # Performance analytics +โ”‚ โ”‚ โ””โ”€โ”€ performance_analytics.py # Insights and tracking +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ common/ # Shared utilities +โ”‚ โ”‚ โ””โ”€โ”€ (various utility modules) +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ __init__.py # Package initialization +โ”‚ โ””โ”€โ”€ __main__.py # Application entry point +โ”‚ +โ”œโ”€โ”€ config/ # Configuration files +โ”‚ โ”œโ”€โ”€ app.yaml # Application configuration +โ”‚ โ””โ”€โ”€ secrets.yaml.example # Secret configuration template +โ”‚ +โ”œโ”€โ”€ deployment/ # Deployment configurations +โ”‚ โ”œโ”€โ”€ docker-compose.yml # Docker deployment +โ”‚ โ”œโ”€โ”€ Dockerfile # Container definition +โ”‚ โ””โ”€โ”€ kubernetes/ # Kubernetes manifests +โ”‚ +โ”œโ”€โ”€ docs/ # Documentation +โ”‚ โ”œโ”€โ”€ getting-started.md # Quick start guide +โ”‚ โ”œโ”€โ”€ api-reference.md # API documentation +โ”‚ โ””โ”€โ”€ deployment.md # Deployment guide +โ”‚ +โ”œโ”€โ”€ examples/ # Usage examples +โ”‚ โ””โ”€โ”€ basic-setup.py # Basic setup example +โ”‚ +โ”œโ”€โ”€ tests/ # Test suite +โ”‚ โ””โ”€โ”€ test_*.py # Unit and integration tests +โ”‚ +โ””โ”€โ”€ requirements.txt # Python dependencies +``` + +## Key Design Principles + +1. **Separation of Concerns**: Clear separation between AP2 core and product features +2. **Modularity**: Each feature in its own module for easy maintenance +3. **Extensibility**: Easy to add new channels, payment methods, or AI models +4. **Production Ready**: Includes deployment, monitoring, and testing infrastructure + +## Next Steps + +After running the repository setup scripts, this structure will be created in your separate product repository with: + +- All enhanced modules from the AP2 samples +- Proper configuration management +- Docker and deployment setup +- Comprehensive documentation +- CI/CD pipeline configuration + +This ensures clean separation between the core AP2 protocol and your innovative product layer! \ No newline at end of file diff --git a/product-layer/__init__.py b/product-layer/__init__.py new file mode 100644 index 00000000..0f5ba3b5 --- /dev/null +++ b/product-layer/__init__.py @@ -0,0 +1,51 @@ +""" +AI Shopping Concierge - Product Layer +==================================== + +This directory contains the AI Shopping Concierge product layer built on top of the AP2 protocol. + +Directory Structure: +------------------- +ai-shopping-agent/ # Core shopping agent modules +โ”œโ”€โ”€ whatsapp-integration/ # WhatsApp Business API integration +โ”œโ”€โ”€ ai-curation/ # AI-powered product curation +โ”œโ”€โ”€ negotiation-engine/ # Smart negotiation and bundling +โ”œโ”€โ”€ checkout-optimizer/ # Enhanced checkout with payment processing +โ”œโ”€โ”€ analytics/ # Performance analytics and insights +โ”œโ”€โ”€ common/ # Shared utilities and base classes +โ””โ”€โ”€ __main__.py # Application entry point + +Key Features: +------------ +๐Ÿค– AI-Powered Curation: Smart product recommendations using Google AI +๐Ÿ’ฌ Multi-Channel Chat: WhatsApp and web chat integration +๐Ÿ’ฐ Dynamic Negotiation: AI-driven pricing and bundle optimization +๐Ÿ’ณ Payment Processing: Automated payment with currency conversion +๐Ÿ“Š Advanced Analytics: Real-time insights and performance tracking + +Integration with AP2: +------------------- +The product layer uses AP2 as a submodule for: +- Core payment infrastructure +- Mandate management +- Transaction security +- Protocol compliance + +This ensures we stay synced with upstream AP2 improvements while building +innovative product features on top. + +Usage: +------ +1. Set up your product repository (see scripts/repository-setup/) +2. Configure API keys in config/secrets.yaml +3. Run: python -m ai_shopping_agent +4. Your AI Shopping Concierge will be ready! + +For detailed setup instructions, see the repository setup scripts in: +scripts/repository-setup/ +""" + +# Version information +__version__ = "1.0.0" +__product__ = "AI Shopping Concierge" +__protocol__ = "AP2" \ No newline at end of file diff --git a/samples/python/src/ai_curation/negotiation_engine.py b/samples/python/src/ai_curation/negotiation_engine.py new file mode 100644 index 00000000..d8078552 --- /dev/null +++ b/samples/python/src/ai_curation/negotiation_engine.py @@ -0,0 +1,736 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AI-Powered Negotiation and Dynamic Pricing Engine. + +This module provides intelligent negotiation capabilities, dynamic pricing, +and advanced bundling strategies to maximize conversions and AOV. +""" + +import json +import logging +import random +from datetime import datetime, timezone, timedelta +from typing import Any, Dict, List, Optional, Tuple +from dataclasses import dataclass +from enum import Enum + +import numpy as np +from pydantic import BaseModel, Field +from google import genai + +logger = logging.getLogger(__name__) + + +class NegotiationStrategy(Enum): + """Different negotiation strategies.""" + + AGGRESSIVE = "aggressive" # High discounts to close quickly + BALANCED = "balanced" # Moderate discounts with conditions + CONSERVATIVE = "conservative" # Small discounts, focus on value + PREMIUM = "premium" # No discounts, emphasize quality + + +class NegotiationStage(Enum): + """Stages of negotiation process.""" + + INITIAL_INTEREST = "initial_interest" + PRICE_OBJECTION = "price_objection" + COMPARISON_SHOPPING = "comparison_shopping" + BUNDLE_CONSIDERATION = "bundle_consideration" + FINAL_DECISION = "final_decision" + ABANDONED = "abandoned" + CLOSED = "closed" + + +@dataclass +class CustomerProfile: + """Customer profile for negotiation.""" + + customer_id: str + price_sensitivity: float # 0-1, higher = more price sensitive + loyalty_score: float # 0-1, higher = more loyal + purchase_history_value: float + negotiation_history: List[Dict[str, Any]] + preferred_communication_style: str # "direct", "consultative", "friendly" + urgency_level: float # 0-1, higher = more urgent + + def get_negotiation_strategy(self) -> NegotiationStrategy: + """Determine best negotiation strategy for this customer.""" + + if self.price_sensitivity > 0.8 and self.urgency_level > 0.7: + return NegotiationStrategy.AGGRESSIVE + elif self.loyalty_score > 0.7 and self.purchase_history_value > 1000: + return NegotiationStrategy.PREMIUM + elif self.price_sensitivity > 0.6: + return NegotiationStrategy.BALANCED + else: + return NegotiationStrategy.CONSERVATIVE + + +class NegotiationOffer(BaseModel): + """Represents a negotiation offer.""" + + offer_id: str + original_price: float + offered_price: float + discount_amount: float + discount_percentage: float + conditions: List[str] = [] + valid_until: datetime + reasoning: str + confidence_score: float = Field(ge=0.0, le=1.0) + bundle_items: Optional[List[Dict[str, Any]]] = None + + +class PricingRule(BaseModel): + """Dynamic pricing rule.""" + + rule_id: str + condition: str + action: str + priority: int + active: bool = True + + # Conditions + min_cart_value: Optional[float] = None + customer_segment: Optional[str] = None + time_constraint: Optional[str] = None # "hours_left", "days_left" + inventory_level: Optional[str] = None # "low", "medium", "high" + + # Actions + discount_percentage: Optional[float] = None + fixed_discount: Optional[float] = None + free_shipping: bool = False + bonus_items: Optional[List[str]] = None + + +class NegotiationEngine: + """AI-powered negotiation and pricing engine.""" + + def __init__(self): + self.llm_client = genai.Client() + self.active_negotiations: Dict[str, Dict[str, Any]] = {} + self.pricing_rules: List[PricingRule] = self._initialize_pricing_rules() + self.customer_profiles: Dict[str, CustomerProfile] = {} + + def _initialize_pricing_rules(self) -> List[PricingRule]: + """Initialize default dynamic pricing rules.""" + + return [ + PricingRule( + rule_id="cart_value_discount", + condition="cart_value >= min_cart_value", + action="apply_percentage_discount", + priority=1, + min_cart_value=200, + discount_percentage=10, + free_shipping=True + ), + PricingRule( + rule_id="new_customer_welcome", + condition="customer_segment == 'new'", + action="apply_percentage_discount", + priority=2, + customer_segment="new", + discount_percentage=15 + ), + PricingRule( + rule_id="loyalty_reward", + condition="customer_segment == 'vip'", + action="apply_fixed_discount", + priority=1, + customer_segment="vip", + fixed_discount=50, + bonus_items=["free_gift_wrap"] + ), + PricingRule( + rule_id="urgency_discount", + condition="time_constraint == 'hours_left'", + action="apply_percentage_discount", + priority=3, + time_constraint="hours_left", + discount_percentage=20 + ), + PricingRule( + rule_id="low_inventory_push", + condition="inventory_level == 'low'", + action="apply_percentage_discount", + priority=2, + inventory_level="low", + discount_percentage=12 + ) + ] + + def get_customer_profile(self, customer_id: str) -> CustomerProfile: + """Get or create customer profile.""" + + if customer_id not in self.customer_profiles: + # Create new profile with default values + self.customer_profiles[customer_id] = CustomerProfile( + customer_id=customer_id, + price_sensitivity=0.5, # Will learn over time + loyalty_score=0.0, + purchase_history_value=0.0, + negotiation_history=[], + preferred_communication_style="friendly", + urgency_level=0.3 + ) + + return self.customer_profiles[customer_id] + + async def start_negotiation( + self, + customer_id: str, + product: Dict[str, Any], + customer_message: str + ) -> NegotiationOffer: + """Start a new negotiation session.""" + + profile = self.get_customer_profile(customer_id) + strategy = profile.get_negotiation_strategy() + + # Analyze customer intent + intent_analysis = await self._analyze_negotiation_intent(customer_message) + + # Generate initial offer + offer = self._generate_initial_offer( + product, + profile, + strategy, + intent_analysis + ) + + # Track negotiation + negotiation_id = f"{customer_id}_{product.get('id', 'unknown')}_{int(datetime.now().timestamp())}" + self.active_negotiations[negotiation_id] = { + "customer_id": customer_id, + "product": product, + "stage": NegotiationStage.INITIAL_INTEREST, + "offers": [offer], + "customer_messages": [customer_message], + "start_time": datetime.now(timezone.utc) + } + + return offer + + async def _analyze_negotiation_intent(self, message: str) -> Dict[str, Any]: + """Analyze customer message for negotiation intent.""" + + prompt = f""" + Analyze this customer message for negotiation intent: "{message}" + + Return JSON with: + - intent_type: "price_inquiry", "discount_request", "comparison", "bundle_interest", "value_concern" + - urgency_level: 0.0-1.0 (how urgent they seem) + - price_sensitivity: 0.0-1.0 (how price-focused they are) + - negotiation_openness: 0.0-1.0 (how open to negotiation) + - specific_concerns: list of specific concerns mentioned + - budget_mentioned: true/false if they mentioned a budget + - competitor_mentioned: true/false if they mentioned competitors + """ + + try: + response = self.llm_client.models.generate_content( + model="gemini-2.5-flash", + contents=prompt, + config={ + "response_mime_type": "application/json", + "response_schema": {"type": "object"} + } + ) + + return response.parsed if response.parsed else {} + + except Exception as e: + logger.error(f"Error analyzing negotiation intent: {e}") + return { + "intent_type": "price_inquiry", + "urgency_level": 0.5, + "price_sensitivity": 0.5, + "negotiation_openness": 0.5, + "specific_concerns": [], + "budget_mentioned": False, + "competitor_mentioned": False + } + + def _generate_initial_offer( + self, + product: Dict[str, Any], + profile: CustomerProfile, + strategy: NegotiationStrategy, + intent_analysis: Dict[str, Any] + ) -> NegotiationOffer: + """Generate initial negotiation offer.""" + + original_price = product.get("price", 100) + + # Base discount based on strategy + strategy_discounts = { + NegotiationStrategy.AGGRESSIVE: (0.15, 0.25), # 15-25% + NegotiationStrategy.BALANCED: (0.08, 0.15), # 8-15% + NegotiationStrategy.CONSERVATIVE: (0.03, 0.08), # 3-8% + NegotiationStrategy.PREMIUM: (0.0, 0.03) # 0-3% + } + + min_discount, max_discount = strategy_discounts[strategy] + + # Adjust based on customer analysis + urgency_boost = intent_analysis.get("urgency_level", 0.5) * 0.05 + sensitivity_boost = intent_analysis.get("price_sensitivity", 0.5) * 0.08 + + final_discount = min_discount + (max_discount - min_discount) * 0.7 + final_discount += urgency_boost + sensitivity_boost + final_discount = max(0.0, min(0.3, final_discount)) # Cap at 30% + + # Apply dynamic pricing rules + rule_discount = self._apply_pricing_rules(profile, original_price) + final_discount = max(final_discount, rule_discount) + + # Calculate offer + discount_amount = original_price * final_discount + offered_price = original_price - discount_amount + + # Generate conditions and reasoning + conditions = self._generate_offer_conditions(strategy, intent_analysis) + reasoning = self._generate_offer_reasoning(strategy, final_discount, conditions) + + return NegotiationOffer( + offer_id=f"offer_{int(datetime.now().timestamp())}", + original_price=original_price, + offered_price=offered_price, + discount_amount=discount_amount, + discount_percentage=final_discount * 100, + conditions=conditions, + valid_until=datetime.now(timezone.utc) + timedelta(hours=24), + reasoning=reasoning, + confidence_score=0.8 + ) + + def _apply_pricing_rules(self, profile: CustomerProfile, cart_value: float) -> float: + """Apply dynamic pricing rules.""" + + max_discount = 0.0 + + for rule in sorted(self.pricing_rules, key=lambda x: x.priority): + if not rule.active: + continue + + discount = 0.0 + + # Check conditions + if rule.min_cart_value and cart_value < rule.min_cart_value: + continue + + if rule.customer_segment: + customer_segment = self._determine_customer_segment(profile) + if customer_segment != rule.customer_segment: + continue + + # Apply discount + if rule.discount_percentage: + discount = rule.discount_percentage / 100 + elif rule.fixed_discount: + discount = rule.fixed_discount / cart_value + + max_discount = max(max_discount, discount) + + return max_discount + + def _determine_customer_segment(self, profile: CustomerProfile) -> str: + """Determine customer segment.""" + + if profile.purchase_history_value > 2000 and profile.loyalty_score > 0.8: + return "vip" + elif profile.purchase_history_value == 0: + return "new" + elif profile.purchase_history_value > 500: + return "regular" + else: + return "occasional" + + def _generate_offer_conditions( + self, + strategy: NegotiationStrategy, + intent_analysis: Dict[str, Any] + ) -> List[str]: + """Generate conditions for the offer.""" + + conditions = [] + + if strategy == NegotiationStrategy.AGGRESSIVE: + conditions.extend([ + "Limited time offer - expires in 24 hours", + "While supplies last" + ]) + elif strategy == NegotiationStrategy.BALANCED: + conditions.extend([ + "Add to cart within 2 hours to secure this price", + "Free shipping included" + ]) + elif strategy == NegotiationStrategy.CONSERVATIVE: + conditions.extend([ + "Valid for 48 hours", + "Minimum purchase of $100" + ]) + + # Add conditions based on intent + if intent_analysis.get("comparison_shopping"): + conditions.append("Price match guarantee included") + + if intent_analysis.get("urgency_level", 0) > 0.7: + conditions.append("Express shipping available") + + return conditions + + def _generate_offer_reasoning( + self, + strategy: NegotiationStrategy, + discount: float, + conditions: List[str] + ) -> str: + """Generate reasoning for the offer.""" + + discount_percent = discount * 100 + + if strategy == NegotiationStrategy.AGGRESSIVE: + return (f"I can offer you an exclusive {discount_percent:.0f}% discount " + f"because we're clearing inventory this week. This is our best price!") + elif strategy == NegotiationStrategy.BALANCED: + return (f"I'd like to offer you {discount_percent:.0f}% off this great product. " + f"It's a fantastic deal that includes free shipping.") + elif strategy == NegotiationStrategy.CONSERVATIVE: + return (f"I can provide a {discount_percent:.0f}% valued customer discount " + f"on this premium item. The quality speaks for itself.") + else: # PREMIUM + return ("This is a premium product at its regular price. " + "The value comes from the exceptional quality and service.") + + async def create_smart_bundle( + self, + primary_product: Dict[str, Any], + related_products: List[Dict[str, Any]], + customer_id: str + ) -> Dict[str, Any]: + """Create an intelligent product bundle.""" + + profile = self.get_customer_profile(customer_id) + + # Select complementary products using AI + bundle_products = await self._select_bundle_products( + primary_product, related_products, profile + ) + + # Calculate bundle pricing + total_original_price = sum(p.get("price", 0) for p in bundle_products) + + # Dynamic bundle discount based on customer profile + base_bundle_discount = 0.12 # 12% base bundle discount + loyalty_bonus = profile.loyalty_score * 0.05 + volume_bonus = min(len(bundle_products) * 0.02, 0.08) + + total_discount = base_bundle_discount + loyalty_bonus + volume_bonus + total_discount = min(total_discount, 0.25) # Cap at 25% + + bundle_price = total_original_price * (1 - total_discount) + savings = total_original_price - bundle_price + + # Generate bundle description + bundle_description = await self._generate_bundle_description(bundle_products) + + return { + "type": "smart_bundle", + "id": f"bundle_{int(datetime.now().timestamp())}", + "name": bundle_description["name"], + "description": bundle_description["description"], + "products": bundle_products, + "original_price": total_original_price, + "bundle_price": bundle_price, + "savings": savings, + "discount_percentage": total_discount * 100, + "value_proposition": bundle_description["value_proposition"], + "valid_until": datetime.now(timezone.utc) + timedelta(hours=48), + "personalization_score": self._calculate_bundle_score(bundle_products, profile) + } + + async def _select_bundle_products( + self, + primary_product: Dict[str, Any], + candidates: List[Dict[str, Any]], + profile: CustomerProfile + ) -> List[Dict[str, Any]]: + """Select best products for bundle using AI.""" + + # Start with primary product + bundle_products = [primary_product] + + if not candidates: + return bundle_products + + # Use LLM to select complementary products + prompt = f""" + Given this primary product: {json.dumps(primary_product, indent=2)} + + And these candidate products: {json.dumps(candidates[:10], indent=2)} + + Select 1-3 products that would create the most compelling bundle for a customer. + Consider: + - Complementary functionality + - Price balance (not all expensive items) + - Logical grouping + - Customer value + + Return JSON array of selected product indices (0-based). + """ + + try: + response = self.llm_client.models.generate_content( + model="gemini-2.5-flash", + contents=prompt, + config={ + "response_mime_type": "application/json", + "response_schema": {"type": "array", "items": {"type": "integer"}} + } + ) + + selected_indices = response.parsed if response.parsed else [0] + + # Add selected products to bundle + for idx in selected_indices: + if 0 <= idx < len(candidates): + bundle_products.append(candidates[idx]) + + except Exception as e: + logger.error(f"Error selecting bundle products: {e}") + # Fallback: add first candidate + if candidates: + bundle_products.append(candidates[0]) + + return bundle_products + + async def _generate_bundle_description( + self, + products: List[Dict[str, Any]] + ) -> Dict[str, str]: + """Generate compelling bundle name and description.""" + + prompt = f""" + Create a compelling bundle name and description for these products: + {json.dumps(products, indent=2)} + + Return JSON with: + - name: Catchy bundle name (max 50 chars) + - description: Brief description explaining why these go together (max 100 chars) + - value_proposition: Why this bundle is a great deal (max 80 chars) + """ + + try: + response = self.llm_client.models.generate_content( + model="gemini-2.5-flash", + contents=prompt, + config={ + "response_mime_type": "application/json", + "response_schema": {"type": "object"} + } + ) + + return response.parsed if response.parsed else { + "name": "Perfect Bundle", + "description": "Everything you need in one package", + "value_proposition": "Save money and get complete solution" + } + + except Exception as e: + logger.error(f"Error generating bundle description: {e}") + return { + "name": "Special Bundle", + "description": "Carefully selected products that work great together", + "value_proposition": "Better value when purchased together" + } + + def _calculate_bundle_score( + self, + products: List[Dict[str, Any]], + profile: CustomerProfile + ) -> float: + """Calculate bundle appeal score for customer.""" + + # Factor in customer preferences, price sensitivity, etc. + base_score = 0.7 + + # Adjust for price sensitivity + total_price = sum(p.get("price", 0) for p in products) + if profile.price_sensitivity > 0.7 and total_price > 300: + base_score -= 0.2 + elif profile.price_sensitivity < 0.3: + base_score += 0.1 + + # Adjust for product variety + categories = set(p.get("category", "other") for p in products) + if len(categories) > 1: + base_score += 0.1 + + return max(0.0, min(1.0, base_score)) + + def handle_counter_offer( + self, + negotiation_id: str, + customer_message: str + ) -> NegotiationOffer: + """Handle customer counter-offer or objection.""" + + if negotiation_id not in self.active_negotiations: + raise ValueError("Negotiation not found") + + negotiation = self.active_negotiations[negotiation_id] + profile = self.get_customer_profile(negotiation["customer_id"]) + + # Analyze counter-offer + intent = self._analyze_counter_offer(customer_message) + + # Update negotiation stage + negotiation["stage"] = self._determine_next_stage(intent, negotiation["stage"]) + negotiation["customer_messages"].append(customer_message) + + # Generate counter-offer + counter_offer = self._generate_counter_offer(negotiation, profile, intent) + negotiation["offers"].append(counter_offer) + + return counter_offer + + def _analyze_counter_offer(self, message: str) -> Dict[str, Any]: + """Analyze customer counter-offer.""" + + # Simple analysis - in production, use more sophisticated NLP + message_lower = message.lower() + + intent = { + "type": "general", + "price_mentioned": any(word in message_lower for word in ["price", "cost", "expensive", "cheap", "budget"]), + "comparison_mentioned": any(word in message_lower for word in ["competitor", "amazon", "elsewhere", "found"]), + "value_concern": any(word in message_lower for word in ["worth", "value", "quality", "features"]), + "urgency": any(word in message_lower for word in ["urgent", "need", "soon", "today", "now"]), + "interest_level": 0.5 + } + + # Estimate interest level + positive_words = ["interested", "like", "good", "great", "yes", "okay"] + negative_words = ["expensive", "much", "high", "no", "not", "can't"] + + positive_count = sum(1 for word in positive_words if word in message_lower) + negative_count = sum(1 for word in negative_words if word in message_lower) + + if positive_count > negative_count: + intent["interest_level"] = 0.7 + elif negative_count > positive_count: + intent["interest_level"] = 0.3 + + return intent + + def _determine_next_stage( + self, + intent: Dict[str, Any], + current_stage: NegotiationStage + ) -> NegotiationStage: + """Determine next negotiation stage.""" + + if intent["price_mentioned"]: + return NegotiationStage.PRICE_OBJECTION + elif intent["comparison_mentioned"]: + return NegotiationStage.COMPARISON_SHOPPING + elif intent["interest_level"] > 0.6: + return NegotiationStage.FINAL_DECISION + elif intent["interest_level"] < 0.3: + return NegotiationStage.ABANDONED + else: + return current_stage + + def _generate_counter_offer( + self, + negotiation: Dict[str, Any], + profile: CustomerProfile, + intent: Dict[str, Any] + ) -> NegotiationOffer: + """Generate counter-offer based on negotiation progress.""" + + last_offer = negotiation["offers"][-1] + product = negotiation["product"] + + # Adjust offer based on negotiation progress + additional_discount = 0.0 + + if negotiation["stage"] == NegotiationStage.PRICE_OBJECTION: + additional_discount = 0.05 # Additional 5% + elif negotiation["stage"] == NegotiationStage.COMPARISON_SHOPPING: + additional_discount = 0.08 # Beat competition + elif len(negotiation["offers"]) > 2: + additional_discount = 0.03 # Persistence bonus + + # Cap total discount + new_discount = last_offer.discount_percentage / 100 + additional_discount + new_discount = min(new_discount, 0.35) # Max 35% total discount + + # Generate new offer + original_price = product.get("price", 100) + discount_amount = original_price * new_discount + offered_price = original_price - discount_amount + + # Update conditions + conditions = last_offer.conditions.copy() + if negotiation["stage"] == NegotiationStage.FINAL_DECISION: + conditions.append("Final offer - expires in 2 hours") + + return NegotiationOffer( + offer_id=f"offer_{int(datetime.now().timestamp())}", + original_price=original_price, + offered_price=offered_price, + discount_amount=discount_amount, + discount_percentage=new_discount * 100, + conditions=conditions, + valid_until=datetime.now(timezone.utc) + timedelta(hours=2), + reasoning=f"Based on your feedback, I can offer an additional {additional_discount*100:.1f}% discount.", + confidence_score=0.7 + ) + + def get_negotiation_analytics(self) -> Dict[str, Any]: + """Get negotiation performance analytics.""" + + total_negotiations = len(self.active_negotiations) + closed_negotiations = sum( + 1 for n in self.active_negotiations.values() + if n["stage"] == NegotiationStage.CLOSED + ) + + average_discount = 0.0 + if total_negotiations > 0: + total_discount = sum( + offer.discount_percentage for n in self.active_negotiations.values() + for offer in n["offers"] + ) / max(1, sum(len(n["offers"]) for n in self.active_negotiations.values())) + average_discount = total_discount + + return { + "total_negotiations": total_negotiations, + "closed_negotiations": closed_negotiations, + "conversion_rate": closed_negotiations / max(1, total_negotiations), + "average_discount_offered": average_discount, + "active_negotiations": total_negotiations - closed_negotiations, + "average_offers_per_negotiation": sum( + len(n["offers"]) for n in self.active_negotiations.values() + ) / max(1, total_negotiations) + } + + +# Global instance +negotiation_engine = NegotiationEngine() \ No newline at end of file diff --git a/samples/python/src/ai_curation/smart_curation_engine.py b/samples/python/src/ai_curation/smart_curation_engine.py new file mode 100644 index 00000000..599bf7b9 --- /dev/null +++ b/samples/python/src/ai_curation/smart_curation_engine.py @@ -0,0 +1,593 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Enhanced AI Product Curation Engine. + +This module provides advanced product recommendation, personalization, +and intelligent curation capabilities for the shopping agent. +""" + +import json +import logging +from datetime import datetime, timezone, timedelta +from typing import Any, Dict, List, Optional, Tuple +from dataclasses import dataclass +from enum import Enum + +import numpy as np +from pydantic import BaseModel, Field +from google import genai + +from ap2.types.payment_request import PaymentItem +from ap2.types.mandate import CartMandate, IntentMandate + +logger = logging.getLogger(__name__) + + +class CustomerPreference(BaseModel): + """Customer preference model.""" + + preference_type: str + value: Any + confidence: float = Field(ge=0.0, le=1.0) + last_updated: datetime + source: str # "explicit", "implicit", "inferred" + + +class ProductCategory(Enum): + """Product categories for better organization.""" + + ELECTRONICS = "electronics" + CLOTHING = "clothing" + HOME_GARDEN = "home_garden" + SPORTS_FITNESS = "sports_fitness" + BOOKS_MEDIA = "books_media" + HEALTH_BEAUTY = "health_beauty" + AUTOMOTIVE = "automotive" + TOYS_GAMES = "toys_games" + FOOD_BEVERAGE = "food_beverage" + OTHER = "other" + + +@dataclass +class ProductMetrics: + """Product performance metrics.""" + + popularity_score: float + conversion_rate: float + average_rating: float + review_count: int + price_competitiveness: float + availability_score: float + + @property + def overall_score(self) -> float: + """Calculate overall product score.""" + weights = { + 'popularity': 0.2, + 'conversion': 0.25, + 'rating': 0.2, + 'reviews': 0.1, + 'price': 0.15, + 'availability': 0.1 + } + + # Normalize review count (log scale) + normalized_reviews = min(np.log10(max(1, self.review_count)) / 4, 1.0) + + return ( + weights['popularity'] * self.popularity_score + + weights['conversion'] * self.conversion_rate + + weights['rating'] * (self.average_rating / 5.0) + + weights['reviews'] * normalized_reviews + + weights['price'] * self.price_competitiveness + + weights['availability'] * self.availability_score + ) + + +class PersonalizationEngine: + """Customer personalization and preference learning.""" + + def __init__(self): + self.customer_profiles: Dict[str, Dict[str, CustomerPreference]] = {} + self.interaction_history: Dict[str, List[Dict[str, Any]]] = {} + + def update_customer_profile( + self, + customer_id: str, + interaction_data: Dict[str, Any] + ): + """Update customer profile based on interaction.""" + + if customer_id not in self.customer_profiles: + self.customer_profiles[customer_id] = {} + + if customer_id not in self.interaction_history: + self.interaction_history[customer_id] = [] + + # Add interaction to history + self.interaction_history[customer_id].append({ + **interaction_data, + "timestamp": datetime.now(timezone.utc) + }) + + # Extract preferences from interaction + preferences = self._extract_preferences(interaction_data) + + for pref_type, pref_data in preferences.items(): + self._update_preference(customer_id, pref_type, pref_data) + + def _extract_preferences(self, interaction: Dict[str, Any]) -> Dict[str, Any]: + """Extract preferences from customer interaction.""" + + preferences = {} + + # Price sensitivity + if "budget" in interaction: + preferences["price_range"] = { + "value": interaction["budget"], + "confidence": 0.8, + "source": "explicit" + } + + # Brand preferences + if "selected_product" in interaction: + product = interaction["selected_product"] + if "brand" in product: + preferences["preferred_brands"] = { + "value": product["brand"], + "confidence": 0.6, + "source": "implicit" + } + + # Category interests + if "viewed_categories" in interaction: + for category in interaction["viewed_categories"]: + preferences[f"category_interest_{category}"] = { + "value": 1.0, + "confidence": 0.5, + "source": "implicit" + } + + return preferences + + def _update_preference( + self, + customer_id: str, + pref_type: str, + pref_data: Dict[str, Any] + ): + """Update specific customer preference.""" + + current_pref = self.customer_profiles[customer_id].get(pref_type) + + if current_pref: + # Merge with existing preference + if pref_data["source"] == "explicit": + # Explicit preferences override implicit ones + confidence = max(pref_data["confidence"], current_pref.confidence) + else: + # Gradually increase confidence for repeated implicit signals + confidence = min(current_pref.confidence + 0.1, 0.9) + + self.customer_profiles[customer_id][pref_type] = CustomerPreference( + preference_type=pref_type, + value=pref_data["value"], + confidence=confidence, + last_updated=datetime.now(timezone.utc), + source=pref_data["source"] + ) + else: + # New preference + self.customer_profiles[customer_id][pref_type] = CustomerPreference( + preference_type=pref_type, + value=pref_data["value"], + confidence=pref_data["confidence"], + last_updated=datetime.now(timezone.utc), + source=pref_data["source"] + ) + + def get_customer_preferences(self, customer_id: str) -> Dict[str, CustomerPreference]: + """Get customer preferences.""" + return self.customer_profiles.get(customer_id, {}) + + def predict_interest(self, customer_id: str, product: Dict[str, Any]) -> float: + """Predict customer interest in a product.""" + + preferences = self.get_customer_preferences(customer_id) + if not preferences: + return 0.5 # Neutral for new customers + + interest_score = 0.5 + total_weight = 0.0 + + # Brand preference + if "preferred_brands" in preferences and "brand" in product: + pref = preferences["preferred_brands"] + if product["brand"] == pref.value: + interest_score += 0.3 * pref.confidence + total_weight += 0.3 * pref.confidence + + # Price range preference + if "price_range" in preferences and "price" in product: + pref = preferences["price_range"] + price_range = pref.value + product_price = product["price"] + + if isinstance(price_range, dict): + min_price = price_range.get("min", 0) + max_price = price_range.get("max", float('inf')) + + if min_price <= product_price <= max_price: + interest_score += 0.4 * pref.confidence + else: + # Penalize for being outside price range + interest_score -= 0.2 * pref.confidence + + total_weight += 0.4 * pref.confidence + + # Category interest + product_category = product.get("category", "") + category_pref_key = f"category_interest_{product_category}" + + if category_pref_key in preferences: + pref = preferences[category_pref_key] + interest_score += 0.3 * pref.confidence + total_weight += 0.3 * pref.confidence + + # Normalize score + if total_weight > 0: + interest_score = interest_score / total_weight + + return max(0.0, min(1.0, interest_score)) + + +class SmartCurationEngine: + """Advanced product curation with AI-powered recommendations.""" + + def __init__(self): + self.personalization_engine = PersonalizationEngine() + self.llm_client = genai.Client() + self.product_metrics_cache: Dict[str, ProductMetrics] = {} + + async def curate_products( + self, + intent_mandate: IntentMandate, + customer_id: str, + max_results: int = 10, + include_bundles: bool = True + ) -> List[Dict[str, Any]]: + """Curate personalized product recommendations.""" + + # Get base product recommendations + base_products = await self._get_base_recommendations(intent_mandate) + + # Apply personalization + personalized_products = self._apply_personalization( + base_products, customer_id + ) + + # Generate smart bundles + if include_bundles: + bundles = await self._generate_smart_bundles( + personalized_products, customer_id + ) + personalized_products.extend(bundles) + + # Rank and filter + ranked_products = self._rank_products( + personalized_products, customer_id + ) + + return ranked_products[:max_results] + + async def _get_base_recommendations( + self, + intent_mandate: IntentMandate + ) -> List[Dict[str, Any]]: + """Get base product recommendations using LLM.""" + + prompt = f""" + Based on the customer's intent: "{intent_mandate.natural_language_description}" + + Generate 15 diverse, realistic product recommendations in JSON format. + Each product should include: + - name: Product name + - brand: Brand name + - price: Price as a number + - category: Product category + - description: Brief description + - features: List of key features + - rating: Average rating (1-5) + - review_count: Number of reviews + - availability: "in_stock" or "limited" or "pre_order" + - image_url: Mock image URL + + Focus on variety in price points, brands, and features. + Include some premium and budget options. + """ + + try: + response = self.llm_client.models.generate_content( + model="gemini-2.5-flash", + contents=prompt, + config={ + "response_mime_type": "application/json", + "response_schema": {"type": "array", "items": {"type": "object"}} + } + ) + + return response.parsed if response.parsed else [] + + except Exception as e: + logger.error(f"Error getting base recommendations: {e}") + return [] + + def _apply_personalization( + self, + products: List[Dict[str, Any]], + customer_id: str + ) -> List[Dict[str, Any]]: + """Apply personalization scoring to products.""" + + for product in products: + # Calculate personalization score + interest_score = self.personalization_engine.predict_interest( + customer_id, product + ) + product["personalization_score"] = interest_score + + # Calculate product metrics + metrics = self._calculate_product_metrics(product) + product["metrics"] = metrics + product["overall_score"] = metrics.overall_score + + return products + + def _calculate_product_metrics(self, product: Dict[str, Any]) -> ProductMetrics: + """Calculate product performance metrics.""" + + # Mock calculations - in production, these would come from real data + review_count = product.get("review_count", 100) + rating = product.get("rating", 4.0) + price = product.get("price", 100) + availability = product.get("availability", "in_stock") + + # Popularity based on review count and rating + popularity_score = min(np.log10(max(1, review_count)) / 4, 1.0) + + # Mock conversion rate based on rating and price + conversion_rate = max(0.1, (rating / 5.0) * (1 - min(price / 1000, 0.5))) + + # Price competitiveness (mock) + price_competitiveness = max(0.1, 1 - (price / 2000)) + + # Availability score + availability_scores = { + "in_stock": 1.0, + "limited": 0.7, + "pre_order": 0.4 + } + availability_score = availability_scores.get(availability, 0.5) + + return ProductMetrics( + popularity_score=popularity_score, + conversion_rate=conversion_rate, + average_rating=rating, + review_count=review_count, + price_competitiveness=price_competitiveness, + availability_score=availability_score + ) + + async def _generate_smart_bundles( + self, + products: List[Dict[str, Any]], + customer_id: str + ) -> List[Dict[str, Any]]: + """Generate intelligent product bundles.""" + + if len(products) < 2: + return [] + + # Group products by category for complementary bundling + category_groups = {} + for product in products: + category = product.get("category", "other") + if category not in category_groups: + category_groups[category] = [] + category_groups[category].append(product) + + bundles = [] + + # Create bundles within categories (upgrade bundles) + for category, cat_products in category_groups.items(): + if len(cat_products) >= 2: + bundle = await self._create_category_bundle(cat_products, customer_id) + if bundle: + bundles.append(bundle) + + # Create cross-category bundles (complementary bundles) + if len(category_groups) >= 2: + cross_bundle = await self._create_cross_category_bundle( + category_groups, customer_id + ) + if cross_bundle: + bundles.append(cross_bundle) + + return bundles + + async def _create_category_bundle( + self, + products: List[Dict[str, Any]], + customer_id: str + ) -> Optional[Dict[str, Any]]: + """Create bundle within same category.""" + + # Sort by overall score and pick top 2-3 + sorted_products = sorted( + products, + key=lambda x: x.get("overall_score", 0), + reverse=True + )[:3] + + if len(sorted_products) < 2: + return None + + # Calculate bundle pricing with discount + total_price = sum(p.get("price", 0) for p in sorted_products) + bundle_discount = 0.15 # 15% bundle discount + bundle_price = total_price * (1 - bundle_discount) + + return { + "type": "bundle", + "name": f"{sorted_products[0].get('category', 'Product')} Bundle", + "products": sorted_products, + "original_price": total_price, + "price": bundle_price, + "savings": total_price - bundle_price, + "discount_percentage": bundle_discount * 100, + "category": "bundle", + "description": f"Save ${total_price - bundle_price:.2f} with this curated bundle!", + "bundle_type": "category_upgrade", + "overall_score": np.mean([p.get("overall_score", 0) for p in sorted_products]) + } + + async def _create_cross_category_bundle( + self, + category_groups: Dict[str, List[Dict[str, Any]]], + customer_id: str + ) -> Optional[Dict[str, Any]]: + """Create complementary cross-category bundle.""" + + # Find complementary categories + complementary_pairs = [ + ("electronics", "accessories"), + ("sports_fitness", "health_beauty"), + ("clothing", "accessories"), + ("home_garden", "electronics") + ] + + available_categories = set(category_groups.keys()) + + for cat1, cat2 in complementary_pairs: + if cat1 in available_categories and cat2 in available_categories: + # Pick best product from each category + product1 = max( + category_groups[cat1], + key=lambda x: x.get("overall_score", 0) + ) + product2 = max( + category_groups[cat2], + key=lambda x: x.get("overall_score", 0) + ) + + total_price = product1.get("price", 0) + product2.get("price", 0) + bundle_discount = 0.12 # 12% cross-category discount + bundle_price = total_price * (1 - bundle_discount) + + return { + "type": "bundle", + "name": "Perfect Pair Bundle", + "products": [product1, product2], + "original_price": total_price, + "price": bundle_price, + "savings": total_price - bundle_price, + "discount_percentage": bundle_discount * 100, + "category": "bundle", + "description": f"Complete your experience with this perfect pair!", + "bundle_type": "complementary", + "overall_score": (product1.get("overall_score", 0) + product2.get("overall_score", 0)) / 2 + } + + return None + + def _rank_products( + self, + products: List[Dict[str, Any]], + customer_id: str + ) -> List[Dict[str, Any]]: + """Rank products using combined scoring.""" + + def calculate_final_score(product): + overall_score = product.get("overall_score", 0.5) + personalization_score = product.get("personalization_score", 0.5) + + # Weight: 60% product quality, 40% personalization + final_score = 0.6 * overall_score + 0.4 * personalization_score + + # Boost bundles slightly + if product.get("type") == "bundle": + final_score *= 1.1 + + return final_score + + # Add final scores + for product in products: + product["final_score"] = calculate_final_score(product) + + # Sort by final score + return sorted(products, key=lambda x: x["final_score"], reverse=True) + + def get_product_analytics(self, customer_id: str) -> Dict[str, Any]: + """Get analytics for product curation performance.""" + + preferences = self.personalization_engine.get_customer_preferences(customer_id) + interaction_history = self.personalization_engine.interaction_history.get(customer_id, []) + + return { + "total_interactions": len(interaction_history), + "preferences_learned": len(preferences), + "high_confidence_preferences": len([ + p for p in preferences.values() if p.confidence > 0.7 + ]), + "last_interaction": interaction_history[-1]["timestamp"] if interaction_history else None, + "top_categories": self._get_top_categories(interaction_history), + "average_session_length": self._calculate_avg_session_length(interaction_history) + } + + def _get_top_categories(self, interactions: List[Dict[str, Any]]) -> List[str]: + """Get top product categories for customer.""" + category_counts = {} + + for interaction in interactions: + if "viewed_categories" in interaction: + for category in interaction["viewed_categories"]: + category_counts[category] = category_counts.get(category, 0) + 1 + + return sorted(category_counts.keys(), key=category_counts.get, reverse=True)[:5] + + def _calculate_avg_session_length(self, interactions: List[Dict[str, Any]]) -> float: + """Calculate average session length in minutes.""" + if len(interactions) < 2: + return 0.0 + + session_lengths = [] + current_session_start = None + + for interaction in interactions: + timestamp = interaction["timestamp"] + + if current_session_start is None: + current_session_start = timestamp + else: + time_diff = (timestamp - current_session_start).total_seconds() / 60 + if time_diff > 30: # New session if gap > 30 minutes + session_lengths.append(time_diff) + current_session_start = timestamp + + return np.mean(session_lengths) if session_lengths else 0.0 + + +# Global instance +curation_engine = SmartCurationEngine() \ No newline at end of file diff --git a/samples/python/src/analytics/performance_analytics.py b/samples/python/src/analytics/performance_analytics.py new file mode 100644 index 00000000..f352f169 --- /dev/null +++ b/samples/python/src/analytics/performance_analytics.py @@ -0,0 +1,741 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Comprehensive Analytics and Performance Tracking System. + +This module provides detailed analytics for the AI shopping agent including +conversion rates, AOV tracking, customer behavior analysis, and business metrics. +""" + +import asyncio +import json +import logging +from datetime import datetime, timezone, timedelta +from typing import Any, Dict, List, Optional, Tuple +from dataclasses import dataclass, asdict +from enum import Enum +from collections import defaultdict + +import numpy as np +import pandas as pd +from pydantic import BaseModel, Field + +logger = logging.getLogger(__name__) + + +class EventType(Enum): + """Types of events to track.""" + + # Customer journey events + CUSTOMER_FIRST_CONTACT = "customer_first_contact" + PRODUCT_SEARCH = "product_search" + PRODUCT_VIEW = "product_view" + PRODUCT_RECOMMENDATION = "product_recommendation" + BUNDLE_CREATED = "bundle_created" + NEGOTIATION_STARTED = "negotiation_started" + DISCOUNT_OFFERED = "discount_offered" + CART_CREATED = "cart_created" + CART_UPDATED = "cart_updated" + CHECKOUT_STARTED = "checkout_started" + CHECKOUT_COMPLETED = "checkout_completed" + CART_ABANDONED = "cart_abandoned" + CUSTOMER_SUPPORT_REQUEST = "customer_support_request" + + # Channel events + WHATSAPP_MESSAGE = "whatsapp_message" + WEB_CHAT_MESSAGE = "web_chat_message" + CHANNEL_SWITCH = "channel_switch" + + # Business events + SALE_COMPLETED = "sale_completed" + REVENUE_GENERATED = "revenue_generated" + REFUND_PROCESSED = "refund_processed" + + +@dataclass +class AnalyticsEvent: + """Individual analytics event.""" + + event_id: str + event_type: EventType + timestamp: datetime + customer_id: str + session_id: Optional[str] + channel: Optional[str] + + # Event data + properties: Dict[str, Any] + + # Derived metrics + revenue: float = 0.0 + quantity: int = 0 + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for storage.""" + return { + **asdict(self), + "event_type": self.event_type.value, + "timestamp": self.timestamp.isoformat() + } + + +class CustomerMetrics(BaseModel): + """Customer-level metrics.""" + + customer_id: str + first_contact: datetime + last_activity: datetime + + # Engagement metrics + total_sessions: int = 0 + total_messages: int = 0 + avg_session_duration: float = 0.0 + preferred_channel: Optional[str] = None + + # Purchase behavior + total_orders: int = 0 + total_revenue: float = 0.0 + avg_order_value: float = 0.0 + conversion_rate: float = 0.0 + + # Negotiation metrics + negotiation_success_rate: float = 0.0 + avg_discount_accepted: float = 0.0 + + # Customer journey + time_to_first_purchase: Optional[float] = None # seconds + cart_abandonment_rate: float = 0.0 + + +class BusinessMetrics(BaseModel): + """Business-level metrics.""" + + period_start: datetime + period_end: datetime + + # Revenue metrics + total_revenue: float = 0.0 + total_orders: int = 0 + avg_order_value: float = 0.0 + revenue_growth_rate: float = 0.0 + + # Conversion metrics + total_visitors: int = 0 + total_conversions: int = 0 + conversion_rate: float = 0.0 + cart_abandonment_rate: float = 0.0 + + # Channel performance + channel_metrics: Dict[str, Dict[str, float]] = {} + + # AI performance + recommendation_acceptance_rate: float = 0.0 + negotiation_success_rate: float = 0.0 + avg_discount_given: float = 0.0 + + # Efficiency metrics + avg_time_to_purchase: float = 0.0 + customer_support_requests_per_order: float = 0.0 + + +class AnalyticsEngine: + """Main analytics engine.""" + + def __init__(self): + self.events: List[AnalyticsEvent] = [] + self.customer_metrics: Dict[str, CustomerMetrics] = {} + self.daily_metrics: Dict[str, BusinessMetrics] = {} + + # Real-time tracking + self.active_sessions: Dict[str, Dict[str, Any]] = {} + self.event_queue = asyncio.Queue() + + # Start background processing + asyncio.create_task(self._process_events()) + asyncio.create_task(self._generate_periodic_reports()) + + async def track_event( + self, + event_type: EventType, + customer_id: str, + properties: Dict[str, Any], + session_id: Optional[str] = None, + channel: Optional[str] = None, + revenue: float = 0.0, + quantity: int = 0 + ): + """Track an analytics event.""" + + event = AnalyticsEvent( + event_id=f"event_{int(datetime.now().timestamp())}_{len(self.events)}", + event_type=event_type, + timestamp=datetime.now(timezone.utc), + customer_id=customer_id, + session_id=session_id, + channel=channel, + properties=properties, + revenue=revenue, + quantity=quantity + ) + + await self.event_queue.put(event) + + async def _process_events(self): + """Process events from queue.""" + + while True: + try: + event = await self.event_queue.get() + await self._handle_event(event) + self.event_queue.task_done() + + except Exception as e: + logger.error(f"Error processing event: {e}") + await asyncio.sleep(1) + + async def _handle_event(self, event: AnalyticsEvent): + """Handle individual event and update metrics.""" + + # Store event + self.events.append(event) + + # Update customer metrics + await self._update_customer_metrics(event) + + # Update business metrics + await self._update_business_metrics(event) + + # Update session tracking + await self._update_session_tracking(event) + + logger.debug(f"Processed event: {event.event_type.value} for customer {event.customer_id}") + + async def _update_customer_metrics(self, event: AnalyticsEvent): + """Update customer-level metrics.""" + + customer_id = event.customer_id + + if customer_id not in self.customer_metrics: + self.customer_metrics[customer_id] = CustomerMetrics( + customer_id=customer_id, + first_contact=event.timestamp, + last_activity=event.timestamp + ) + + metrics = self.customer_metrics[customer_id] + metrics.last_activity = event.timestamp + + # Update based on event type + if event.event_type == EventType.CUSTOMER_FIRST_CONTACT: + metrics.total_sessions += 1 + + elif event.event_type in [EventType.WHATSAPP_MESSAGE, EventType.WEB_CHAT_MESSAGE]: + metrics.total_messages += 1 + if event.channel: + metrics.preferred_channel = self._determine_preferred_channel(customer_id) + + elif event.event_type == EventType.SALE_COMPLETED: + metrics.total_orders += 1 + metrics.total_revenue += event.revenue + metrics.avg_order_value = metrics.total_revenue / metrics.total_orders + + if metrics.total_orders == 1: + metrics.time_to_first_purchase = ( + event.timestamp - metrics.first_contact + ).total_seconds() + + elif event.event_type == EventType.CART_ABANDONED: + # Calculate abandonment rate + total_carts = len([ + e for e in self.events + if e.customer_id == customer_id and e.event_type == EventType.CART_CREATED + ]) + abandoned_carts = len([ + e for e in self.events + if e.customer_id == customer_id and e.event_type == EventType.CART_ABANDONED + ]) + metrics.cart_abandonment_rate = abandoned_carts / max(1, total_carts) + + elif event.event_type == EventType.NEGOTIATION_STARTED: + # Update negotiation metrics + await self._update_negotiation_metrics(customer_id) + + def _determine_preferred_channel(self, customer_id: str) -> str: + """Determine customer's preferred channel.""" + + channel_counts = defaultdict(int) + + for event in self.events: + if event.customer_id == customer_id and event.channel: + channel_counts[event.channel] += 1 + + if channel_counts: + return max(channel_counts.keys(), key=channel_counts.get) + + return "unknown" + + async def _update_negotiation_metrics(self, customer_id: str): + """Update negotiation success metrics for customer.""" + + customer_events = [e for e in self.events if e.customer_id == customer_id] + + negotiation_starts = [ + e for e in customer_events if e.event_type == EventType.NEGOTIATION_STARTED + ] + + successful_negotiations = [ + e for e in customer_events + if e.event_type == EventType.SALE_COMPLETED + and e.properties.get("negotiated", False) + ] + + if negotiation_starts: + metrics = self.customer_metrics[customer_id] + metrics.negotiation_success_rate = len(successful_negotiations) / len(negotiation_starts) + + # Calculate average discount accepted + discounts = [ + e.properties.get("discount_percentage", 0) + for e in successful_negotiations + if e.properties.get("discount_percentage") + ] + + if discounts: + metrics.avg_discount_accepted = np.mean(discounts) + + async def _update_business_metrics(self, event: AnalyticsEvent): + """Update business-level metrics.""" + + date_key = event.timestamp.date().isoformat() + + if date_key not in self.daily_metrics: + self.daily_metrics[date_key] = BusinessMetrics( + period_start=datetime.combine(event.timestamp.date(), datetime.min.time()), + period_end=datetime.combine(event.timestamp.date(), datetime.max.time()) + ) + + metrics = self.daily_metrics[date_key] + + if event.event_type == EventType.CUSTOMER_FIRST_CONTACT: + metrics.total_visitors += 1 + + elif event.event_type == EventType.SALE_COMPLETED: + metrics.total_orders += 1 + metrics.total_revenue += event.revenue + metrics.total_conversions += 1 + + if metrics.total_orders > 0: + metrics.avg_order_value = metrics.total_revenue / metrics.total_orders + + if metrics.total_visitors > 0: + metrics.conversion_rate = metrics.total_conversions / metrics.total_visitors + + elif event.event_type == EventType.CART_ABANDONED: + # Update abandonment rate + today_events = [ + e for e in self.events + if e.timestamp.date() == event.timestamp.date() + ] + + total_carts = len([ + e for e in today_events if e.event_type == EventType.CART_CREATED + ]) + abandoned_carts = len([ + e for e in today_events if e.event_type == EventType.CART_ABANDONED + ]) + + metrics.cart_abandonment_rate = abandoned_carts / max(1, total_carts) + + # Update channel metrics + if event.channel: + if event.channel not in metrics.channel_metrics: + metrics.channel_metrics[event.channel] = { + "messages": 0, + "conversions": 0, + "revenue": 0.0 + } + + if event.event_type in [EventType.WHATSAPP_MESSAGE, EventType.WEB_CHAT_MESSAGE]: + metrics.channel_metrics[event.channel]["messages"] += 1 + + elif event.event_type == EventType.SALE_COMPLETED: + metrics.channel_metrics[event.channel]["conversions"] += 1 + metrics.channel_metrics[event.channel]["revenue"] += event.revenue + + async def _update_session_tracking(self, event: AnalyticsEvent): + """Update session tracking.""" + + if not event.session_id: + return + + if event.session_id not in self.active_sessions: + self.active_sessions[event.session_id] = { + "customer_id": event.customer_id, + "start_time": event.timestamp, + "last_activity": event.timestamp, + "events": [], + "channel": event.channel + } + + session = self.active_sessions[event.session_id] + session["last_activity"] = event.timestamp + session["events"].append(event.event_type.value) + + # Calculate session duration and update customer metrics + if event.customer_id in self.customer_metrics: + duration = (event.timestamp - session["start_time"]).total_seconds() + customer_metrics = self.customer_metrics[event.customer_id] + + # Update average session duration + total_duration = customer_metrics.avg_session_duration * (customer_metrics.total_sessions - 1) + duration + customer_metrics.avg_session_duration = total_duration / customer_metrics.total_sessions + + def get_customer_analytics(self, customer_id: str) -> Optional[CustomerMetrics]: + """Get analytics for specific customer.""" + return self.customer_metrics.get(customer_id) + + def get_business_analytics( + self, + start_date: Optional[datetime] = None, + end_date: Optional[datetime] = None + ) -> Dict[str, Any]: + """Get business analytics for date range.""" + + if not start_date: + start_date = datetime.now() - timedelta(days=30) + if not end_date: + end_date = datetime.now() + + # Filter events in date range + filtered_events = [ + e for e in self.events + if start_date <= e.timestamp <= end_date + ] + + # Calculate aggregated metrics + total_revenue = sum(e.revenue for e in filtered_events) + total_orders = len([e for e in filtered_events if e.event_type == EventType.SALE_COMPLETED]) + total_visitors = len(set(e.customer_id for e in filtered_events)) + + # Customer insights + customer_segments = self._analyze_customer_segments(filtered_events) + + # Channel performance + channel_performance = self._analyze_channel_performance(filtered_events) + + # Product performance + product_insights = self._analyze_product_performance(filtered_events) + + # AI performance + ai_performance = self._analyze_ai_performance(filtered_events) + + return { + "period": { + "start": start_date.isoformat(), + "end": end_date.isoformat() + }, + "summary": { + "total_revenue": total_revenue, + "total_orders": total_orders, + "total_visitors": total_visitors, + "avg_order_value": total_revenue / max(1, total_orders), + "conversion_rate": total_orders / max(1, total_visitors) + }, + "customer_segments": customer_segments, + "channel_performance": channel_performance, + "product_insights": product_insights, + "ai_performance": ai_performance, + "trends": self._calculate_trends(filtered_events) + } + + def _analyze_customer_segments(self, events: List[AnalyticsEvent]) -> Dict[str, Any]: + """Analyze customer segments.""" + + customers = list(set(e.customer_id for e in events)) + + segments = { + "new_customers": 0, + "returning_customers": 0, + "high_value_customers": 0, + "at_risk_customers": 0 + } + + for customer_id in customers: + if customer_id in self.customer_metrics: + metrics = self.customer_metrics[customer_id] + + if metrics.total_orders == 0: + segments["new_customers"] += 1 + elif metrics.total_orders > 1: + segments["returning_customers"] += 1 + + if metrics.total_revenue > 500: + segments["high_value_customers"] += 1 + + days_since_last_activity = ( + datetime.now(timezone.utc) - metrics.last_activity + ).days + + if days_since_last_activity > 30 and metrics.total_orders > 0: + segments["at_risk_customers"] += 1 + + return segments + + def _analyze_channel_performance(self, events: List[AnalyticsEvent]) -> Dict[str, Any]: + """Analyze channel performance.""" + + channel_data = defaultdict(lambda: { + "messages": 0, + "customers": set(), + "conversions": 0, + "revenue": 0.0 + }) + + for event in events: + if event.channel: + data = channel_data[event.channel] + + if event.event_type in [EventType.WHATSAPP_MESSAGE, EventType.WEB_CHAT_MESSAGE]: + data["messages"] += 1 + data["customers"].add(event.customer_id) + + elif event.event_type == EventType.SALE_COMPLETED: + data["conversions"] += 1 + data["revenue"] += event.revenue + + # Convert to final format + result = {} + for channel, data in channel_data.items(): + result[channel] = { + "total_messages": data["messages"], + "unique_customers": len(data["customers"]), + "conversions": data["conversions"], + "revenue": data["revenue"], + "conversion_rate": data["conversions"] / max(1, len(data["customers"])), + "revenue_per_customer": data["revenue"] / max(1, len(data["customers"])) + } + + return result + + def _analyze_product_performance(self, events: List[AnalyticsEvent]) -> Dict[str, Any]: + """Analyze product performance.""" + + # Mock product analysis - in production, extract from event properties + return { + "top_selling_products": [ + {"name": "Premium Wireless Headphones", "sales": 45, "revenue": 8955}, + {"name": "Smart Fitness Watch", "sales": 32, "revenue": 9600}, + {"name": "Bluetooth Speaker Bundle", "sales": 28, "revenue": 2519} + ], + "top_searched_products": [ + {"name": "laptop", "searches": 156}, + {"name": "headphones", "searches": 134}, + {"name": "phone", "searches": 98} + ], + "bundle_performance": { + "bundles_created": 67, + "bundles_purchased": 23, + "bundle_conversion_rate": 0.34, + "average_bundle_value": 287.50 + } + } + + def _analyze_ai_performance(self, events: List[AnalyticsEvent]) -> Dict[str, Any]: + """Analyze AI system performance.""" + + recommendation_events = [ + e for e in events if e.event_type == EventType.PRODUCT_RECOMMENDATION + ] + + negotiation_events = [ + e for e in events if e.event_type == EventType.NEGOTIATION_STARTED + ] + + discount_events = [ + e for e in events if e.event_type == EventType.DISCOUNT_OFFERED + ] + + return { + "recommendations": { + "total_recommendations": len(recommendation_events), + "acceptance_rate": 0.67, # Mock - calculate from actual data + "avg_recommendation_value": 189.50 + }, + "negotiations": { + "total_negotiations": len(negotiation_events), + "success_rate": 0.58, + "avg_discount_given": 12.5, + "negotiation_conversion_rate": 0.45 + }, + "discounts": { + "total_discounts_offered": len(discount_events), + "discount_acceptance_rate": 0.73, + "avg_discount_percentage": 11.2, + "revenue_impact": -1247.30 # Negative = cost, positive = gain + } + } + + def _calculate_trends(self, events: List[AnalyticsEvent]) -> Dict[str, Any]: + """Calculate trends over time.""" + + # Group events by day + daily_data = defaultdict(lambda: { + "revenue": 0.0, + "orders": 0, + "visitors": set() + }) + + for event in events: + day = event.timestamp.date().isoformat() + + if event.event_type == EventType.SALE_COMPLETED: + daily_data[day]["revenue"] += event.revenue + daily_data[day]["orders"] += 1 + + daily_data[day]["visitors"].add(event.customer_id) + + # Calculate trends + days = sorted(daily_data.keys()) + if len(days) < 2: + return {"trend": "insufficient_data"} + + # Revenue trend + recent_revenue = sum(daily_data[day]["revenue"] for day in days[-7:]) + previous_revenue = sum(daily_data[day]["revenue"] for day in days[-14:-7]) if len(days) >= 14 else recent_revenue + + revenue_growth = ((recent_revenue - previous_revenue) / max(1, previous_revenue)) * 100 if previous_revenue > 0 else 0 + + # Order trend + recent_orders = sum(daily_data[day]["orders"] for day in days[-7:]) + previous_orders = sum(daily_data[day]["orders"] for day in days[-14:-7]) if len(days) >= 14 else recent_orders + + order_growth = ((recent_orders - previous_orders) / max(1, previous_orders)) * 100 if previous_orders > 0 else 0 + + return { + "revenue_growth_7d": revenue_growth, + "order_growth_7d": order_growth, + "trend_direction": "up" if revenue_growth > 5 else "down" if revenue_growth < -5 else "stable" + } + + async def _generate_periodic_reports(self): + """Generate periodic analytics reports.""" + + while True: + try: + # Generate daily report at midnight + now = datetime.now() + if now.hour == 0 and now.minute == 0: + await self._generate_daily_report() + + # Generate weekly report on Sundays + if now.weekday() == 6 and now.hour == 0: + await self._generate_weekly_report() + + await asyncio.sleep(3600) # Check every hour + + except Exception as e: + logger.error(f"Error generating periodic reports: {e}") + await asyncio.sleep(3600) + + async def _generate_daily_report(self): + """Generate daily analytics report.""" + + yesterday = datetime.now() - timedelta(days=1) + analytics = self.get_business_analytics( + start_date=yesterday.replace(hour=0, minute=0, second=0), + end_date=yesterday.replace(hour=23, minute=59, second=59) + ) + + logger.info(f"Daily report generated: {analytics['summary']}") + + # In production, send to stakeholders via email/dashboard + + async def _generate_weekly_report(self): + """Generate weekly analytics report.""" + + week_start = datetime.now() - timedelta(days=7) + analytics = self.get_business_analytics(start_date=week_start) + + logger.info(f"Weekly report generated: {analytics['summary']}") + + # In production, send comprehensive report to stakeholders + + def export_data(self, format: str = "json") -> str: + """Export analytics data.""" + + if format == "json": + export_data = { + "events": [event.to_dict() for event in self.events], + "customer_metrics": { + cid: metrics.dict() for cid, metrics in self.customer_metrics.items() + }, + "business_metrics": { + date: metrics.dict() for date, metrics in self.daily_metrics.items() + } + } + return json.dumps(export_data, indent=2, default=str) + + elif format == "csv": + # Convert events to DataFrame and export as CSV + events_data = [event.to_dict() for event in self.events] + df = pd.DataFrame(events_data) + return df.to_csv(index=False) + + else: + raise ValueError(f"Unsupported export format: {format}") + + +# Global analytics instance +analytics_engine = AnalyticsEngine() + + +# Helper functions for easy tracking +async def track_customer_interaction(customer_id: str, message: str, channel: str): + """Track customer interaction.""" + await analytics_engine.track_event( + EventType.WHATSAPP_MESSAGE if channel == "whatsapp" else EventType.WEB_CHAT_MESSAGE, + customer_id=customer_id, + properties={"message": message}, + channel=channel + ) + + +async def track_product_search(customer_id: str, query: str, results_count: int): + """Track product search.""" + await analytics_engine.track_event( + EventType.PRODUCT_SEARCH, + customer_id=customer_id, + properties={"query": query, "results_count": results_count} + ) + + +async def track_sale_completion(customer_id: str, order_value: float, items: List[Dict[str, Any]], channel: str): + """Track completed sale.""" + await analytics_engine.track_event( + EventType.SALE_COMPLETED, + customer_id=customer_id, + properties={"items": items, "order_id": f"order_{int(datetime.now().timestamp())}"}, + channel=channel, + revenue=order_value, + quantity=len(items) + ) + + +async def track_cart_abandonment(customer_id: str, cart_value: float, stage: str): + """Track cart abandonment.""" + await analytics_engine.track_event( + EventType.CART_ABANDONED, + customer_id=customer_id, + properties={"cart_value": cart_value, "abandonment_stage": stage} + ) \ No newline at end of file diff --git a/samples/python/src/channels/unified_chat_manager.py b/samples/python/src/channels/unified_chat_manager.py new file mode 100644 index 00000000..001f40b7 --- /dev/null +++ b/samples/python/src/channels/unified_chat_manager.py @@ -0,0 +1,812 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified Multi-Channel Chat Interface. + +This module provides a unified interface for handling conversations across +multiple channels (WhatsApp, web chat, SMS, etc.) while maintaining context +and providing consistent shopping experiences. +""" + +import asyncio +import json +import logging +from abc import ABC, abstractmethod +from datetime import datetime, timezone, timedelta +from typing import Any, Dict, List, Optional, Union +from enum import Enum + +from pydantic import BaseModel, Field +from fastapi import FastAPI, WebSocket, WebSocketDisconnect +from fastapi.staticfiles import StaticFiles +from fastapi.responses import HTMLResponse + +from channels.whatsapp_integration import WhatsAppShoppingAgent +from ai_curation.smart_curation_engine import curation_engine +from ai_curation.negotiation_engine import negotiation_engine +from roles.shopping_agent.agent import root_agent + +logger = logging.getLogger(__name__) + + +class ChannelType(Enum): + """Supported communication channels.""" + + WHATSAPP = "whatsapp" + WEB_CHAT = "web_chat" + SMS = "sms" + TELEGRAM = "telegram" + INSTAGRAM = "instagram" + FACEBOOK = "facebook" + + +class MessageType(Enum): + """Types of messages.""" + + TEXT = "text" + IMAGE = "image" + QUICK_REPLY = "quick_reply" + BUTTON = "button" + CAROUSEL = "carousel" + LOCATION = "location" + CONTACT = "contact" + + +class Message(BaseModel): + """Universal message model.""" + + id: str + channel: ChannelType + sender_id: str + message_type: MessageType + content: Union[str, Dict[str, Any]] + timestamp: datetime + metadata: Dict[str, Any] = {} + + # Channel-specific data + channel_message_id: Optional[str] = None + reply_to: Optional[str] = None + attachments: List[Dict[str, Any]] = [] + + +class ConversationContext(BaseModel): + """Maintains conversation context across channels.""" + + customer_id: str + active_channels: List[ChannelType] = [] + current_channel: Optional[ChannelType] = None + conversation_history: List[Message] = [] + shopping_context: Dict[str, Any] = {} + preferences: Dict[str, Any] = {} + session_start: datetime + last_activity: datetime + + # Shopping-specific context + current_intent: Optional[str] = None + cart_items: List[Dict[str, Any]] = [] + browsing_history: List[Dict[str, Any]] = [] + negotiation_state: Optional[Dict[str, Any]] = None + + +class ChannelAdapter(ABC): + """Abstract base class for channel adapters.""" + + @abstractmethod + async def send_message(self, recipient: str, message: Message) -> bool: + """Send message through this channel.""" + pass + + @abstractmethod + async def format_message(self, message: Dict[str, Any]) -> Message: + """Format channel-specific message to universal format.""" + pass + + @abstractmethod + async def format_response(self, message: Message) -> Dict[str, Any]: + """Format universal message to channel-specific format.""" + pass + + @abstractmethod + def supports_rich_content(self) -> bool: + """Whether channel supports rich content (buttons, carousels, etc.).""" + pass + + +class WhatsAppAdapter(ChannelAdapter): + """WhatsApp Business API adapter.""" + + def __init__(self): + self.whatsapp_agent = WhatsAppShoppingAgent() + + async def send_message(self, recipient: str, message: Message) -> bool: + """Send message via WhatsApp.""" + + if message.message_type == MessageType.TEXT: + return await self.whatsapp_agent.send_whatsapp_message( + recipient, str(message.content) + ) + elif message.message_type == MessageType.BUTTON: + content = message.content + if isinstance(content, dict): + return await self.whatsapp_agent.send_interactive_message( + recipient, + content.get("header", ""), + content.get("body", ""), + content.get("buttons", []) + ) + + return False + + async def format_message(self, whatsapp_data: Dict[str, Any]) -> Message: + """Convert WhatsApp message to universal format.""" + + return Message( + id=whatsapp_data.get("id", ""), + channel=ChannelType.WHATSAPP, + sender_id=whatsapp_data.get("from", ""), + message_type=MessageType.TEXT, + content=whatsapp_data.get("text", {}).get("body", ""), + timestamp=datetime.fromtimestamp( + int(whatsapp_data.get("timestamp", 0)), timezone.utc + ), + channel_message_id=whatsapp_data.get("id") + ) + + async def format_response(self, message: Message) -> Dict[str, Any]: + """Format universal message for WhatsApp.""" + + if message.message_type == MessageType.TEXT: + return { + "messaging_product": "whatsapp", + "to": message.sender_id, + "type": "text", + "text": {"body": str(message.content)} + } + elif message.message_type == MessageType.BUTTON: + content = message.content + if isinstance(content, dict): + return { + "messaging_product": "whatsapp", + "to": message.sender_id, + "type": "interactive", + "interactive": { + "type": "button", + "header": {"type": "text", "text": content.get("header", "")}, + "body": {"text": content.get("body", "")}, + "action": {"buttons": content.get("buttons", [])} + } + } + + return {} + + def supports_rich_content(self) -> bool: + """WhatsApp supports rich content.""" + return True + + +class WebChatAdapter(ChannelAdapter): + """Web chat widget adapter.""" + + def __init__(self): + self.active_connections: Dict[str, WebSocket] = {} + + async def send_message(self, recipient: str, message: Message) -> bool: + """Send message via WebSocket.""" + + if recipient in self.active_connections: + try: + response_data = await self.format_response(message) + await self.active_connections[recipient].send_text( + json.dumps(response_data) + ) + return True + except Exception as e: + logger.error(f"Error sending web chat message: {e}") + return False + + return False + + async def format_message(self, web_data: Dict[str, Any]) -> Message: + """Convert web chat message to universal format.""" + + return Message( + id=web_data.get("id", ""), + channel=ChannelType.WEB_CHAT, + sender_id=web_data.get("sender_id", ""), + message_type=MessageType(web_data.get("type", "text")), + content=web_data.get("content", ""), + timestamp=datetime.now(timezone.utc), + metadata=web_data.get("metadata", {}) + ) + + async def format_response(self, message: Message) -> Dict[str, Any]: + """Format universal message for web chat.""" + + return { + "id": message.id, + "type": message.message_type.value, + "content": message.content, + "timestamp": message.timestamp.isoformat(), + "sender": "assistant" + } + + def supports_rich_content(self) -> bool: + """Web chat supports rich content.""" + return True + + async def add_connection(self, client_id: str, websocket: WebSocket): + """Add WebSocket connection.""" + self.active_connections[client_id] = websocket + + async def remove_connection(self, client_id: str): + """Remove WebSocket connection.""" + if client_id in self.active_connections: + del self.active_connections[client_id] + + +class UnifiedChatManager: + """Manages conversations across all channels.""" + + def __init__(self): + self.adapters: Dict[ChannelType, ChannelAdapter] = { + ChannelType.WHATSAPP: WhatsAppAdapter(), + ChannelType.WEB_CHAT: WebChatAdapter() + } + + self.active_conversations: Dict[str, ConversationContext] = {} + self.message_queue = asyncio.Queue() + + # AI components + self.shopping_agent = root_agent + self.curation_engine = curation_engine + self.negotiation_engine = negotiation_engine + + # Start message processor + asyncio.create_task(self._process_messages()) + + async def handle_incoming_message( + self, + channel: ChannelType, + raw_message: Dict[str, Any] + ) -> bool: + """Handle incoming message from any channel.""" + + try: + # Convert to universal format + adapter = self.adapters[channel] + message = await adapter.format_message(raw_message) + + # Get or create conversation context + context = self.get_or_create_context(message.sender_id, channel) + context.conversation_history.append(message) + context.last_activity = datetime.now(timezone.utc) + context.current_channel = channel + + # Add to processing queue + await self.message_queue.put((context, message)) + + return True + + except Exception as e: + logger.error(f"Error handling incoming message: {e}") + return False + + def get_or_create_context( + self, + customer_id: str, + channel: ChannelType + ) -> ConversationContext: + """Get or create conversation context.""" + + if customer_id not in self.active_conversations: + self.active_conversations[customer_id] = ConversationContext( + customer_id=customer_id, + session_start=datetime.now(timezone.utc), + last_activity=datetime.now(timezone.utc) + ) + + context = self.active_conversations[customer_id] + + if channel not in context.active_channels: + context.active_channels.append(channel) + + return context + + async def _process_messages(self): + """Process messages from the queue.""" + + while True: + try: + context, message = await self.message_queue.get() + await self._handle_message(context, message) + self.message_queue.task_done() + + except Exception as e: + logger.error(f"Error processing message: {e}") + await asyncio.sleep(1) + + async def _handle_message(self, context: ConversationContext, message: Message): + """Process individual message with shopping intelligence.""" + + try: + # Update customer profile for personalization + interaction_data = { + "message": str(message.content), + "channel": message.channel.value, + "timestamp": message.timestamp, + "metadata": message.metadata + } + + self.curation_engine.personalization_engine.update_customer_profile( + context.customer_id, interaction_data + ) + + # Determine response strategy + response_strategy = await self._analyze_message_intent(message, context) + + # Generate response + response = await self._generate_response(message, context, response_strategy) + + # Send response + if response: + await self._send_response(context, response) + + except Exception as e: + logger.error(f"Error handling message: {e}") + # Send error response + error_response = self._create_error_response(message.sender_id, message.channel) + await self._send_response(context, error_response) + + async def _analyze_message_intent( + self, + message: Message, + context: ConversationContext + ) -> Dict[str, Any]: + """Analyze message intent and determine response strategy.""" + + message_text = str(message.content).lower() + + # Check for common intents + intents = { + "greeting": any(word in message_text for word in ["hi", "hello", "hey", "start"]), + "product_search": any(word in message_text for word in ["buy", "shop", "looking for", "need", "want", "find"]), + "price_inquiry": any(word in message_text for word in ["price", "cost", "how much", "expensive"]), + "negotiation": any(word in message_text for word in ["discount", "deal", "cheaper", "negotiate", "better price"]), + "comparison": any(word in message_text for word in ["compare", "vs", "better", "alternative"]), + "cart_action": any(word in message_text for word in ["cart", "checkout", "buy now", "purchase"]), + "support": any(word in message_text for word in ["help", "support", "problem", "issue"]) + } + + # Determine primary intent + primary_intent = max(intents.keys(), key=lambda k: intents[k]) + + return { + "primary_intent": primary_intent if intents[primary_intent] else "general", + "intents": intents, + "message_sentiment": self._analyze_sentiment(message_text), + "urgency_level": self._analyze_urgency(message_text), + "channel_capabilities": self.adapters[message.channel].supports_rich_content() + } + + def _analyze_sentiment(self, text: str) -> str: + """Simple sentiment analysis.""" + + positive_words = ["good", "great", "excellent", "love", "like", "awesome", "perfect"] + negative_words = ["bad", "terrible", "hate", "dislike", "awful", "horrible", "worst"] + + positive_count = sum(1 for word in positive_words if word in text) + negative_count = sum(1 for word in negative_words if word in text) + + if positive_count > negative_count: + return "positive" + elif negative_count > positive_count: + return "negative" + else: + return "neutral" + + def _analyze_urgency(self, text: str) -> float: + """Analyze message urgency (0-1 scale).""" + + urgent_words = ["urgent", "asap", "immediately", "now", "today", "emergency"] + urgent_count = sum(1 for word in urgent_words if word in text) + + return min(1.0, urgent_count * 0.3 + 0.1) + + async def _generate_response( + self, + message: Message, + context: ConversationContext, + strategy: Dict[str, Any] + ) -> Optional[Message]: + """Generate appropriate response based on strategy.""" + + intent = strategy["primary_intent"] + + if intent == "greeting": + return self._create_greeting_response(message, context) + + elif intent == "product_search": + return await self._handle_product_search(message, context) + + elif intent == "price_inquiry" or intent == "negotiation": + return await self._handle_price_negotiation(message, context) + + elif intent == "cart_action": + return await self._handle_cart_action(message, context) + + elif intent == "support": + return self._create_support_response(message, context) + + else: + return await self._handle_general_query(message, context) + + def _create_greeting_response( + self, + message: Message, + context: ConversationContext + ) -> Message: + """Create personalized greeting response.""" + + # Check if returning customer + is_returning = len(context.conversation_history) > 1 + + if is_returning: + content = ("Welcome back! ๐Ÿ‘‹ I remember you were interested in some great products. " + "How can I help you continue your shopping journey today?") + else: + content = ("๐Ÿ‘‹ Hi! I'm your AI shopping assistant. I can help you find products, " + "compare prices, negotiate deals, and complete purchases right here!\n\n" + "What are you looking to buy today? For example:\n" + "โ€ข 'I need a new laptop'\n" + "โ€ข 'Show me winter jackets under $200'\n" + "โ€ข 'Find me the best phone deals'") + + return Message( + id=f"response_{int(datetime.now().timestamp())}", + channel=message.channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content=content, + timestamp=datetime.now(timezone.utc) + ) + + async def _handle_product_search( + self, + message: Message, + context: ConversationContext + ) -> Message: + """Handle product search requests.""" + + # Extract search intent + search_query = str(message.content) + + # Update context + context.current_intent = search_query + context.browsing_history.append({ + "query": search_query, + "timestamp": datetime.now(timezone.utc) + }) + + # Mock product recommendations (integrate with actual curation engine) + content = (f"๐Ÿ” Great! I'll help you find '{search_query}'. " + f"Let me search our catalog for the best options...\n\n" + f"I found several excellent matches! Here are my top recommendations:\n\n" + f"1๏ธโƒฃ **Premium Option** - $299\n" + f"โญ 4.8/5 stars | Free shipping\n" + f"๐ŸŽฏ Perfect match for your needs\n\n" + f"2๏ธโƒฃ **Best Value** - $149\n" + f"โญ 4.6/5 stars | 2-day delivery\n" + f"๐Ÿ’ฐ Great features at lower price\n\n" + f"3๏ธโƒฃ **Bundle Deal** - $199 (save $50!)\n" + f"โญ 4.7/5 stars | Complete package\n" + f"๐Ÿ“ฆ Everything you need included\n\n" + f"Which option interests you most? Or would you like me to:\n" + f"๐Ÿ’ฌ Negotiate a better price\n" + f"๐Ÿ”„ See more alternatives\n" + f"๐Ÿ“Š Compare features side-by-side") + + return Message( + id=f"response_{int(datetime.now().timestamp())}", + channel=message.channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content=content, + timestamp=datetime.now(timezone.utc) + ) + + async def _handle_price_negotiation( + self, + message: Message, + context: ConversationContext + ) -> Message: + """Handle price negotiation requests.""" + + content = ("๐Ÿ’ฐ I'd be happy to help you get the best price! " + "Let me see what I can do...\n\n" + "โœจ **Special Offer for You:**\n" + "โ€ข 15% off your first purchase\n" + "โ€ข Free shipping (save $15)\n" + "โ€ข Extended 60-day returns\n\n" + "This brings your total down from $299 to $254!\n\n" + "This exclusive offer is valid for the next 2 hours. " + "Shall I add this to your cart with the special pricing?") + + return Message( + id=f"response_{int(datetime.now().timestamp())}", + channel=message.channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content=content, + timestamp=datetime.now(timezone.utc) + ) + + async def _handle_cart_action( + self, + message: Message, + context: ConversationContext + ) -> Message: + """Handle cart and checkout actions.""" + + content = ("๐Ÿ›’ **Your Cart Summary:**\n\n" + "๐Ÿ“ฑ Premium Smartphone - $254.00\n" + "๐Ÿšš Free shipping - $0.00\n" + "๐Ÿ’ฐ Discount applied - (-$45.00)\n" + "๐Ÿ“‹ **Total: $254.00**\n\n" + "Ready to checkout? I'll guide you through our secure AP2 payment process. " + "It's fast, safe, and you can pay with your preferred method.\n\n" + "Just say 'checkout' to proceed, or 'add more' to continue shopping!") + + return Message( + id=f"response_{int(datetime.now().timestamp())}", + channel=message.channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content=content, + timestamp=datetime.now(timezone.utc) + ) + + def _create_support_response( + self, + message: Message, + context: ConversationContext + ) -> Message: + """Create support response.""" + + content = ("๐Ÿ†˜ I'm here to help! I can assist you with:\n\n" + "๐Ÿ›๏ธ **Shopping:**\n" + "โ€ข Finding products\n" + "โ€ข Comparing options\n" + "โ€ข Getting best prices\n\n" + "๐Ÿ’ณ **Orders & Payments:**\n" + "โ€ข Secure checkout\n" + "โ€ข Order status\n" + "โ€ข Returns & refunds\n\n" + "What specific help do you need today?") + + return Message( + id=f"response_{int(datetime.now().timestamp())}", + channel=message.channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content=content, + timestamp=datetime.now(timezone.utc) + ) + + async def _handle_general_query( + self, + message: Message, + context: ConversationContext + ) -> Message: + """Handle general queries.""" + + content = ("I understand you're looking for assistance! I'm your AI shopping assistant " + "and I specialize in helping you find great products and complete purchases.\n\n" + "Here's what I can help you with:\n" + "๐Ÿ” Find specific products\n" + "๐Ÿ’ฐ Get the best prices and deals\n" + "๐Ÿ“ฆ Create perfect bundles\n" + "๐Ÿ›’ Complete secure checkout\n\n" + "What would you like to shop for today?") + + return Message( + id=f"response_{int(datetime.now().timestamp())}", + channel=message.channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content=content, + timestamp=datetime.now(timezone.utc) + ) + + def _create_error_response(self, sender_id: str, channel: ChannelType) -> Message: + """Create error response.""" + + return Message( + id=f"error_{int(datetime.now().timestamp())}", + channel=channel, + sender_id="assistant", + message_type=MessageType.TEXT, + content="Sorry, I encountered an issue. Please try again or contact support if the problem persists.", + timestamp=datetime.now(timezone.utc) + ) + + async def _send_response(self, context: ConversationContext, response: Message): + """Send response through appropriate channel.""" + + channel = context.current_channel + if channel and channel in self.adapters: + adapter = self.adapters[channel] + success = await adapter.send_message(context.customer_id, response) + + if success: + context.conversation_history.append(response) + else: + logger.error(f"Failed to send response via {channel}") + + def get_conversation_analytics(self, customer_id: str) -> Dict[str, Any]: + """Get conversation analytics for a customer.""" + + if customer_id not in self.active_conversations: + return {} + + context = self.active_conversations[customer_id] + + return { + "total_messages": len(context.conversation_history), + "conversation_duration": ( + context.last_activity - context.session_start + ).total_seconds() / 60, # minutes + "active_channels": [ch.value for ch in context.active_channels], + "current_intent": context.current_intent, + "cart_value": sum(item.get("price", 0) for item in context.cart_items), + "browsing_history_count": len(context.browsing_history) + } + + +# FastAPI app for web chat +app = FastAPI(title="Unified Chat Manager", version="1.0.0") + +# Initialize chat manager +chat_manager = UnifiedChatManager() + +# Serve static files for web chat widget +app.mount("/static", StaticFiles(directory="static"), name="static") + + +@app.get("/") +async def get_chat_widget(): + """Serve the web chat widget.""" + + html_content = """ + + + + AI Shopping Assistant + + + +
+
+
+ + +
+
+ + + + + """ + + return HTMLResponse(content=html_content) + + +@app.websocket("/ws/{client_id}") +async def websocket_endpoint(websocket: WebSocket, client_id: str): + """WebSocket endpoint for web chat.""" + + await websocket.accept() + + # Add connection to web chat adapter + web_adapter = chat_manager.adapters[ChannelType.WEB_CHAT] + await web_adapter.add_connection(client_id, websocket) + + try: + while True: + data = await websocket.receive_text() + message_data = json.loads(data) + message_data["sender_id"] = client_id + + # Handle message through unified manager + await chat_manager.handle_incoming_message( + ChannelType.WEB_CHAT, message_data + ) + + except WebSocketDisconnect: + await web_adapter.remove_connection(client_id) + + +@app.post("/webhook/whatsapp") +async def whatsapp_webhook(request: dict): + """Handle WhatsApp webhook.""" + + return await chat_manager.handle_incoming_message( + ChannelType.WHATSAPP, request + ) + + +@app.get("/analytics/{customer_id}") +async def get_customer_analytics(customer_id: str): + """Get customer conversation analytics.""" + + return chat_manager.get_conversation_analytics(customer_id) + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/samples/python/src/channels/whatsapp_integration.py b/samples/python/src/channels/whatsapp_integration.py new file mode 100644 index 00000000..5e1cc0fd --- /dev/null +++ b/samples/python/src/channels/whatsapp_integration.py @@ -0,0 +1,430 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""WhatsApp Business API integration for the AI Shopping Agent. + +This module provides a seamless integration between WhatsApp Business API +and the AP2 shopping agent, enabling customers to shop through WhatsApp chat. +""" + +import asyncio +import json +import logging +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional + +import aiohttp +from fastapi import FastAPI, HTTPException, Request +from pydantic import BaseModel, Field + +from roles.shopping_agent.agent import root_agent +from common.system_utils import get_env_var + +logger = logging.getLogger(__name__) + + +class WhatsAppMessage(BaseModel): + """WhatsApp message structure.""" + + from_number: str = Field(..., alias="from") + to_number: str = Field(..., alias="to") + message_type: str = Field(..., alias="type") + text: Optional[str] = None + media_url: Optional[str] = None + media_type: Optional[str] = None + timestamp: datetime + message_id: str + + +class WhatsAppContact(BaseModel): + """WhatsApp contact information.""" + + phone_number: str + name: Optional[str] = None + profile_name: Optional[str] = None + + +class WhatsAppWebhookEvent(BaseModel): + """WhatsApp webhook event structure.""" + + entry: List[Dict[str, Any]] + object: str + + +class CustomerSession: + """Manages individual customer shopping sessions.""" + + def __init__(self, phone_number: str): + self.phone_number = phone_number + self.session_id = f"whatsapp_{phone_number}_{int(datetime.now().timestamp())}" + self.conversation_history: List[Dict[str, Any]] = [] + self.shopping_context: Dict[str, Any] = {} + self.last_activity = datetime.now(timezone.utc) + self.cart_items: List[Dict[str, Any]] = [] + self.customer_preferences: Dict[str, Any] = {} + + def add_message(self, message: str, sender: str): + """Add a message to conversation history.""" + self.conversation_history.append({ + "timestamp": datetime.now(timezone.utc).isoformat(), + "sender": sender, + "message": message + }) + self.last_activity = datetime.now(timezone.utc) + + +class WhatsAppShoppingAgent: + """Main WhatsApp shopping agent integration.""" + + def __init__(self): + self.whatsapp_token = get_env_var("WHATSAPP_BUSINESS_TOKEN") + self.whatsapp_phone_id = get_env_var("WHATSAPP_PHONE_NUMBER_ID") + self.webhook_verify_token = get_env_var("WHATSAPP_WEBHOOK_VERIFY_TOKEN") + self.base_url = f"https://graph.facebook.com/v18.0/{self.whatsapp_phone_id}" + + # Session management + self.active_sessions: Dict[str, CustomerSession] = {} + self.session_timeout = 3600 # 1 hour timeout + + # Agent integration + self.shopping_agent = root_agent + + async def send_whatsapp_message( + self, + to_number: str, + message: str, + message_type: str = "text" + ) -> bool: + """Send a message via WhatsApp Business API.""" + + headers = { + "Authorization": f"Bearer {self.whatsapp_token}", + "Content-Type": "application/json" + } + + payload = { + "messaging_product": "whatsapp", + "to": to_number, + "type": message_type, + "text": {"body": message} + } + + try: + async with aiohttp.ClientSession() as session: + async with session.post( + f"{self.base_url}/messages", + headers=headers, + json=payload + ) as response: + if response.status == 200: + logger.info(f"Message sent successfully to {to_number}") + return True + else: + logger.error(f"Failed to send message: {await response.text()}") + return False + except Exception as e: + logger.error(f"Error sending WhatsApp message: {e}") + return False + + async def send_interactive_message( + self, + to_number: str, + header: str, + body: str, + buttons: List[Dict[str, str]] + ) -> bool: + """Send an interactive message with buttons.""" + + headers = { + "Authorization": f"Bearer {self.whatsapp_token}", + "Content-Type": "application/json" + } + + interactive_buttons = [] + for i, button in enumerate(buttons): + interactive_buttons.append({ + "type": "reply", + "reply": { + "id": f"btn_{i}", + "title": button["title"] + } + }) + + payload = { + "messaging_product": "whatsapp", + "to": to_number, + "type": "interactive", + "interactive": { + "type": "button", + "header": {"type": "text", "text": header}, + "body": {"text": body}, + "action": {"buttons": interactive_buttons} + } + } + + try: + async with aiohttp.ClientSession() as session: + async with session.post( + f"{self.base_url}/messages", + headers=headers, + json=payload + ) as response: + return response.status == 200 + except Exception as e: + logger.error(f"Error sending interactive message: {e}") + return False + + def get_or_create_session(self, phone_number: str) -> CustomerSession: + """Get existing session or create new one.""" + + # Clean up expired sessions + self._cleanup_expired_sessions() + + if phone_number not in self.active_sessions: + self.active_sessions[phone_number] = CustomerSession(phone_number) + logger.info(f"Created new session for {phone_number}") + + return self.active_sessions[phone_number] + + def _cleanup_expired_sessions(self): + """Remove expired sessions.""" + current_time = datetime.now(timezone.utc) + expired_sessions = [] + + for phone_number, session in self.active_sessions.items(): + time_diff = (current_time - session.last_activity).total_seconds() + if time_diff > self.session_timeout: + expired_sessions.append(phone_number) + + for phone_number in expired_sessions: + del self.active_sessions[phone_number] + logger.info(f"Expired session for {phone_number}") + + async def process_incoming_message(self, webhook_data: Dict[str, Any]) -> bool: + """Process incoming WhatsApp message.""" + + try: + for entry in webhook_data.get("entry", []): + for change in entry.get("changes", []): + if change.get("field") == "messages": + messages = change.get("value", {}).get("messages", []) + + for message in messages: + await self._handle_message(message) + + return True + + except Exception as e: + logger.error(f"Error processing webhook data: {e}") + return False + + async def _handle_message(self, message: Dict[str, Any]): + """Handle individual WhatsApp message.""" + + phone_number = message.get("from") + message_type = message.get("type") + timestamp = datetime.fromtimestamp(int(message.get("timestamp")), timezone.utc) + + if not phone_number: + logger.error("No phone number in message") + return + + # Get or create customer session + session = self.get_or_create_session(phone_number) + + # Extract message content + message_text = "" + if message_type == "text": + message_text = message.get("text", {}).get("body", "") + elif message_type == "interactive": + # Handle button responses + button_reply = message.get("interactive", {}).get("button_reply", {}) + message_text = button_reply.get("title", "") + + if not message_text: + await self.send_whatsapp_message( + phone_number, + "Sorry, I can only handle text messages right now. How can I help you shop today?" + ) + return + + # Add to conversation history + session.add_message(message_text, "customer") + + # Process with shopping agent + agent_response = await self._get_agent_response(session, message_text) + + # Send response back to customer + if agent_response: + await self.send_whatsapp_message(phone_number, agent_response) + session.add_message(agent_response, "agent") + + async def _get_agent_response(self, session: CustomerSession, message: str) -> str: + """Get response from the shopping agent.""" + + try: + # Prepare context for the agent + context = { + "customer_phone": session.phone_number, + "session_id": session.session_id, + "conversation_history": session.conversation_history[-5:], # Last 5 messages + "shopping_context": session.shopping_context, + "channel": "whatsapp" + } + + # TODO: Integrate with actual shopping agent + # For now, return a simple response + + # Check for common shopping intents + message_lower = message.lower() + + if any(word in message_lower for word in ["hi", "hello", "hey", "start"]): + return ("๐Ÿ‘‹ Hi! I'm your AI shopping assistant. I can help you find products, " + "compare prices, create bundles, and complete your purchase right here in WhatsApp!\n\n" + "What are you looking to buy today? For example:\n" + "โ€ข 'I need a new phone'\n" + "โ€ข 'Show me winter jackets'\n" + "โ€ข 'Find me a laptop under $1000'") + + elif any(word in message_lower for word in ["buy", "shop", "looking for", "need", "want"]): + # Extract product intent + return (f"Great! I'll help you find '{message}'. " + f"Let me search our catalog for the best options...\n\n" + f"๐Ÿ” *Searching for products...*\n\n" + f"I found several great options! Would you like me to:\n" + f"1๏ธโƒฃ Show you the top 3 recommendations\n" + f"2๏ธโƒฃ Filter by price range\n" + f"3๏ธโƒฃ Show bundle deals\n\n" + f"Just reply with 1, 2, or 3!") + + elif message_lower in ["1", "2", "3"]: + if message_lower == "1": + return self._generate_product_recommendations() + elif message_lower == "2": + return "๐Ÿ’ฐ What's your budget range?\n\nโ€ข Under $50\nโ€ข $50-$200\nโ€ข $200-$500\nโ€ข Over $500\n\nJust tell me your range!" + elif message_lower == "3": + return self._generate_bundle_offers() + + else: + return ("I understand you're interested in shopping! Let me help you find what you need. " + "Could you tell me more specifically what you're looking for?") + + except Exception as e: + logger.error(f"Error getting agent response: {e}") + return "Sorry, I encountered an error. Please try again!" + + def _generate_product_recommendations(self) -> str: + """Generate mock product recommendations with negotiation options.""" + return ( + "๐Ÿ›๏ธ *Top 3 Recommendations:*\n\n" + + "1๏ธโƒฃ **Premium Wireless Headphones**\n" + "๐Ÿ’ฐ $199.99 ~~$249.99~~\n" + "โญ 4.8/5 stars | Free shipping\n" + "๐ŸŽต Noise cancelling, 30hr battery\n\n" + + "2๏ธโƒฃ **Smart Fitness Watch**\n" + "๐Ÿ’ฐ $299.99\n" + "โญ 4.6/5 stars | 2-day delivery\n" + "โค๏ธ Heart rate, GPS, waterproof\n\n" + + "3๏ธโƒฃ **Bluetooth Speaker Bundle**\n" + "๐Ÿ’ฐ $89.99 for 2 speakers!\n" + "โญ 4.7/5 stars | Limited time offer\n" + "๐Ÿ”Š 360ยฐ sound, 20hr battery each\n\n" + + "๐Ÿ’ฌ Reply with the number to select, or:\n" + "๐Ÿ’ธ 'Negotiate 1' to discuss pricing\n" + "๐Ÿ“ฆ 'Bundle deal' for combo offers\n" + "๐Ÿ”„ 'More options' to see alternatives" + ) + + def _generate_bundle_offers(self) -> str: + """Generate bundle offers with negotiation.""" + return ( + "๐ŸŽ *Special Bundle Deals:*\n\n" + + "๐Ÿ“ฑ **Tech Bundle** - Save $100!\n" + "โ€ข Wireless Headphones\n" + "โ€ข Phone Case\n" + "โ€ข Wireless Charger\n" + "๐Ÿ’ฐ $179.99 (was $279.99)\n\n" + + "๐Ÿƒ **Fitness Bundle** - Save $75!\n" + "โ€ข Fitness Watch\n" + "โ€ข Bluetooth Earbuds\n" + "โ€ข Gym Bag\n" + "๐Ÿ’ฐ $324.99 (was $399.99)\n\n" + + "๐Ÿ  **Home Audio Bundle** - Save $50!\n" + "โ€ข 2x Bluetooth Speakers\n" + "โ€ข Smart Display\n" + "โ€ข Streaming Device\n" + "๐Ÿ’ฐ $249.99 (was $299.99)\n\n" + + "๐Ÿ’ฌ Interested? Reply:\n" + "โ€ข Bundle name to select\n" + "โ€ข 'Custom bundle' to create your own\n" + "โ€ข 'Negotiate' + bundle name to discuss pricing" + ) + + +# FastAPI app for webhook handling +app = FastAPI(title="WhatsApp Shopping Agent", version="1.0.0") +whatsapp_agent = WhatsAppShoppingAgent() + + +@app.get("/webhook") +async def verify_webhook(request: Request): + """Verify WhatsApp webhook.""" + hub_mode = request.query_params.get("hub.mode") + hub_token = request.query_params.get("hub.verify_token") + hub_challenge = request.query_params.get("hub.challenge") + + if hub_mode == "subscribe" and hub_token == whatsapp_agent.webhook_verify_token: + logger.info("Webhook verified successfully") + return int(hub_challenge) + else: + logger.error("Webhook verification failed") + raise HTTPException(status_code=403, detail="Forbidden") + + +@app.post("/webhook") +async def handle_webhook(request: Request): + """Handle incoming WhatsApp messages.""" + try: + body = await request.json() + logger.info(f"Received webhook: {json.dumps(body, indent=2)}") + + success = await whatsapp_agent.process_incoming_message(body) + + if success: + return {"status": "ok"} + else: + raise HTTPException(status_code=500, detail="Processing failed") + + except Exception as e: + logger.error(f"Webhook handling error: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@app.get("/health") +async def health_check(): + """Health check endpoint.""" + return {"status": "healthy", "service": "WhatsApp Shopping Agent"} + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/samples/python/src/optimization/checkout_optimizer.py b/samples/python/src/optimization/checkout_optimizer.py new file mode 100644 index 00000000..651dc8d8 --- /dev/null +++ b/samples/python/src/optimization/checkout_optimizer.py @@ -0,0 +1,1282 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Advanced Checkout Optimization and Cart Abandonment Recovery. + +This module provides intelligent checkout optimization, cart abandonment recovery, +one-click purchasing, and conversion optimization features to maximize sales. +""" + +import asyncio +import json +import logging +from datetime import datetime, timezone, timedelta +from typing import Any, Dict, List, Optional, Tuple +from dataclasses import dataclass +from enum import Enum +from decimal import Decimal, ROUND_HALF_UP + +import numpy as np +import aiohttp +from pydantic import BaseModel, Field +from google import genai + +from ap2.types.mandate import CartMandate +from ai_curation.negotiation_engine import negotiation_engine + +logger = logging.getLogger(__name__) + + +class CheckoutStage(Enum): + """Checkout process stages.""" + + CART_REVIEW = "cart_review" + SHIPPING_INFO = "shipping_info" + PAYMENT_METHOD = "payment_method" + CURRENCY_CONVERSION = "currency_conversion" + ORDER_CONFIRMATION = "order_confirmation" + PAYMENT_PROCESSING = "payment_processing" + SETTLEMENT_PROCESSING = "settlement_processing" + COMPLETED = "completed" + ABANDONED = "abandoned" + + +class PaymentStatus(Enum): + """Payment processing status.""" + + PENDING = "pending" + AUTHORIZED = "authorized" + CAPTURED = "captured" + SETTLED = "settled" + FAILED = "failed" + REFUNDED = "refunded" + DISPUTED = "disputed" + + +class CurrencyCode(Enum): + """Supported currency codes.""" + + USD = "USD" + EUR = "EUR" + GBP = "GBP" + JPY = "JPY" + CAD = "CAD" + AUD = "AUD" + CHF = "CHF" + CNY = "CNY" + INR = "INR" + BRL = "BRL" + + +class AbandonmentReason(Enum): + """Common cart abandonment reasons.""" + + HIGH_PRICE = "high_price" + UNEXPECTED_COSTS = "unexpected_costs" + COMPLEX_CHECKOUT = "complex_checkout" + SECURITY_CONCERNS = "security_concerns" + COMPARISON_SHOPPING = "comparison_shopping" + NO_URGENCY = "no_urgency" + TECHNICAL_ISSUES = "technical_issues" + CHANGED_MIND = "changed_mind" + CURRENCY_CONCERNS = "currency_concerns" + PAYMENT_FAILED = "payment_failed" + + +@dataclass +class PaymentDetails: + """Payment processing details.""" + + payment_id: str + amount: Decimal + original_currency: CurrencyCode + customer_currency: CurrencyCode + converted_amount: Decimal + exchange_rate: Decimal + payment_method: str + status: PaymentStatus + + # Transaction details + authorization_id: Optional[str] = None + capture_id: Optional[str] = None + settlement_id: Optional[str] = None + + # Timestamps + initiated_at: Optional[datetime] = None + authorized_at: Optional[datetime] = None + captured_at: Optional[datetime] = None + settled_at: Optional[datetime] = None + + # Fees and processing + processing_fee: Decimal = Decimal('0.00') + currency_conversion_fee: Decimal = Decimal('0.00') + settlement_fee: Decimal = Decimal('0.00') + + # Error handling + error_code: Optional[str] = None + error_message: Optional[str] = None + retry_count: int = 0 + + +@dataclass +class SettlementDetails: + """Settlement processing details.""" + + settlement_id: str + payment_id: str + merchant_account: str + settlement_amount: Decimal + settlement_currency: CurrencyCode + settlement_date: datetime + + # Batch processing + batch_id: Optional[str] = None + batch_sequence: Optional[int] = None + + # Fees breakdown + processing_fees: Decimal = Decimal('0.00') + network_fees: Decimal = Decimal('0.00') + fx_fees: Decimal = Decimal('0.00') + + # Status tracking + status: str = "pending" + confirmation_number: Optional[str] = None + + +@dataclass +class CheckoutSession: + """Tracks individual checkout session.""" + + session_id: str + customer_id: str + cart_items: List[Dict[str, Any]] + current_stage: CheckoutStage + start_time: datetime + last_activity: datetime + + # Progress tracking + stages_completed: List[CheckoutStage] + time_per_stage: Dict[CheckoutStage, float] + + # Customer behavior + hesitation_points: List[Dict[str, Any]] + support_requests: List[str] + price_sensitivity_signals: List[str] + + # Optimization data + applied_incentives: List[Dict[str, Any]] + recovery_attempts: int + conversion_probability: float + + # Payment processing + payment_details: Optional[PaymentDetails] = None + settlement_details: Optional[SettlementDetails] = None + + # Currency preferences + customer_currency: Optional[CurrencyCode] = None + detected_location: Optional[str] = None + currency_auto_detected: bool = False + + +class ConversionOptimizer: + """Optimizes checkout conversion rates with automatic payment processing.""" + + def __init__(self): + self.llm_client = genai.Client() + self.active_sessions: Dict[str, CheckoutSession] = {} + self.abandonment_patterns: Dict[str, List[Dict[str, Any]]] = {} + + # Payment processing + self.payment_processors = self._initialize_payment_processors() + self.currency_service = CurrencyConversionService() + self.settlement_service = SettlementService() + + # Recovery strategies + self.recovery_strategies = self._initialize_recovery_strategies() + + # Start background tasks + asyncio.create_task(self._monitor_sessions()) + asyncio.create_task(self._process_recovery_queue()) + asyncio.create_task(self._process_settlements()) + + def _initialize_payment_processors(self) -> Dict[str, Any]: + """Initialize payment processing configurations.""" + + return { + "stripe": { + "api_key": "sk_test_...", # From environment + "webhook_secret": "whsec_...", + "supported_currencies": ["USD", "EUR", "GBP", "CAD", "AUD"], + "fees": { + "domestic": Decimal("0.029"), # 2.9% + "international": Decimal("0.039"), # 3.9% + "currency_conversion": Decimal("0.01") # 1% + } + }, + "paypal": { + "client_id": "...", + "client_secret": "...", + "supported_currencies": ["USD", "EUR", "GBP", "JPY", "CAD"], + "fees": { + "domestic": Decimal("0.0349"), # 3.49% + "international": Decimal("0.0499"), # 4.99% + "currency_conversion": Decimal("0.025") # 2.5% + } + }, + "ap2": { + "merchant_id": "...", + "api_endpoint": "https://ap2.googleapis.com/v1", + "supported_currencies": ["USD", "EUR", "GBP", "JPY", "CAD", "AUD"], + "fees": { + "domestic": Decimal("0.015"), # 1.5% + "international": Decimal("0.025"), # 2.5% + "currency_conversion": Decimal("0.005") # 0.5% + } + } + } + + def _initialize_recovery_strategies(self) -> List[Dict[str, Any]]: + """Initialize cart abandonment recovery strategies.""" + + return [ + { + "name": "immediate_discount", + "trigger_delay": 30, # seconds + "conditions": ["price_sensitivity_high"], + "action": "offer_discount", + "discount_percentage": 10, + "message": "Wait! I can offer you 10% off if you complete your purchase in the next 15 minutes!" + }, + { + "name": "free_shipping", + "trigger_delay": 60, + "conditions": ["unexpected_costs"], + "action": "offer_free_shipping", + "message": "I noticed shipping costs might be a concern. Let me offer you free shipping on this order!" + }, + { + "name": "limited_time_offer", + "trigger_delay": 120, + "conditions": ["no_urgency"], + "action": "create_urgency", + "message": "๐Ÿ”ฅ This item is popular! Only 3 left in stock. Secure yours now before it's gone!" + }, + { + "name": "customer_support", + "trigger_delay": 180, + "conditions": ["technical_issues", "security_concerns"], + "action": "offer_support", + "message": "Having trouble with checkout? I'm here to help! Let me guide you through the process." + }, + { + "name": "social_proof", + "trigger_delay": 240, + "conditions": ["comparison_shopping"], + "action": "show_social_proof", + "message": "๐Ÿ’ฏ Over 1,000 customers bought this item this month! Join them with our secure, trusted checkout." + }, + { + "name": "payment_options", + "trigger_delay": 300, + "conditions": ["payment_concerns"], + "action": "show_payment_options", + "message": "๐Ÿ’ณ Multiple secure payment options available: Credit card, PayPal, Apple Pay, and more!" + }, + { + "name": "currency_assistance", + "trigger_delay": 120, + "conditions": ["currency_concerns"], + "action": "offer_currency_help", + "message": "๐Ÿ’ฑ I can display prices in your local currency and handle automatic conversion at checkout!" + }, + { + "name": "payment_retry", + "trigger_delay": 60, + "conditions": ["payment_failed"], + "action": "offer_payment_retry", + "message": "๐Ÿ’ณ Payment didn't go through? Let me help you try a different payment method or resolve any issues." + } + ] + + # Payment Processing Methods + + async def detect_customer_currency(self, session_id: str, customer_ip: str = None) -> CurrencyCode: + """Auto-detect customer's preferred currency based on location.""" + + if session_id not in self.active_sessions: + return CurrencyCode.USD # Default fallback + + session = self.active_sessions[session_id] + + # Try to detect from IP geolocation + if customer_ip: + try: + async with aiohttp.ClientSession() as http_session: + async with http_session.get(f"http://ip-api.com/json/{customer_ip}") as response: + if response.status == 200: + geo_data = await response.json() + country_code = geo_data.get("countryCode", "US") + + # Map country to currency + currency_map = { + "US": CurrencyCode.USD, + "GB": CurrencyCode.GBP, + "DE": CurrencyCode.EUR, "FR": CurrencyCode.EUR, "IT": CurrencyCode.EUR, + "JP": CurrencyCode.JPY, + "CA": CurrencyCode.CAD, + "AU": CurrencyCode.AUD, + "CH": CurrencyCode.CHF, + "CN": CurrencyCode.CNY, + "IN": CurrencyCode.INR, + "BR": CurrencyCode.BRL + } + + detected_currency = currency_map.get(country_code, CurrencyCode.USD) + session.customer_currency = detected_currency + session.detected_location = country_code + session.currency_auto_detected = True + + logger.info(f"Auto-detected currency {detected_currency.value} for session {session_id}") + return detected_currency + + except Exception as e: + logger.warning(f"Failed to detect currency from IP: {e}") + + # Fallback to USD + session.customer_currency = CurrencyCode.USD + return CurrencyCode.USD + + async def initiate_payment( + self, + session_id: str, + payment_method: str = "ap2", + amount: Decimal = None, + original_currency: CurrencyCode = CurrencyCode.USD + ) -> PaymentDetails: + """Initiate payment processing with automatic currency conversion.""" + + if session_id not in self.active_sessions: + raise ValueError(f"Session {session_id} not found") + + session = self.active_sessions[session_id] + + # Calculate total amount if not provided + if amount is None: + amount = Decimal(str(sum(item.get("price", 0) for item in session.cart_items))) + + # Detect customer currency if not set + if not session.customer_currency: + await self.detect_customer_currency(session_id) + + customer_currency = session.customer_currency or CurrencyCode.USD + + # Get exchange rate and convert amount + exchange_rate, converted_amount, conversion_fee = await self.currency_service.convert_currency( + amount, original_currency, customer_currency + ) + + # Generate payment ID + payment_id = f"pay_{session_id}_{int(datetime.now().timestamp())}" + + # Create payment details + payment_details = PaymentDetails( + payment_id=payment_id, + amount=amount, + original_currency=original_currency, + customer_currency=customer_currency, + converted_amount=converted_amount, + exchange_rate=exchange_rate, + payment_method=payment_method, + status=PaymentStatus.PENDING, + initiated_at=datetime.now(timezone.utc), + currency_conversion_fee=conversion_fee + ) + + # Store payment details + session.payment_details = payment_details + session.current_stage = CheckoutStage.PAYMENT_PROCESSING + + # Process payment based on method + try: + if payment_method == "ap2": + await self._process_ap2_payment(payment_details) + elif payment_method == "stripe": + await self._process_stripe_payment(payment_details) + elif payment_method == "paypal": + await self._process_paypal_payment(payment_details) + else: + raise ValueError(f"Unsupported payment method: {payment_method}") + + logger.info(f"Payment initiated for session {session_id}: {payment_id}") + + except Exception as e: + payment_details.status = PaymentStatus.FAILED + payment_details.error_message = str(e) + logger.error(f"Payment failed for session {session_id}: {e}") + raise + + return payment_details + + async def start_checkout_session( + self, + customer_id: str, + cart_items: List[Dict[str, Any]] + ) -> str: + """Start a new checkout session.""" + + session_id = f"checkout_{customer_id}_{int(datetime.now().timestamp())}" + + session = CheckoutSession( + session_id=session_id, + customer_id=customer_id, + cart_items=cart_items, + current_stage=CheckoutStage.CART_REVIEW, + start_time=datetime.now(timezone.utc), + last_activity=datetime.now(timezone.utc), + stages_completed=[], + time_per_stage={}, + hesitation_points=[], + support_requests=[], + price_sensitivity_signals=[], + applied_incentives=[], + recovery_attempts=0, + conversion_probability=self._calculate_initial_conversion_probability(customer_id, cart_items) + ) + + self.active_sessions[session_id] = session + + logger.info(f"Started checkout session {session_id} for customer {customer_id}") + + return session_id + + def _calculate_initial_conversion_probability( + self, + customer_id: str, + cart_items: List[Dict[str, Any]] + ) -> float: + """Calculate initial conversion probability.""" + + base_probability = 0.7 # 70% base conversion rate + + # Adjust based on cart value + cart_value = sum(item.get("price", 0) for item in cart_items) + if cart_value > 500: + base_probability -= 0.1 # Higher cart value = slight hesitation + elif cart_value < 50: + base_probability += 0.1 # Low cart value = easier decision + + # Adjust based on customer history (mock) + # In production, this would use real customer data + customer_history_score = 0.8 # Mock score + base_probability = (base_probability + customer_history_score) / 2 + + return max(0.1, min(0.9, base_probability)) + + async def track_checkout_progress( + self, + session_id: str, + stage: CheckoutStage, + metadata: Optional[Dict[str, Any]] = None + ): + """Track checkout progress and detect potential issues.""" + + if session_id not in self.active_sessions: + logger.warning(f"Session {session_id} not found") + return + + session = self.active_sessions[session_id] + previous_stage = session.current_stage + + # Calculate time spent in previous stage + if previous_stage not in session.time_per_stage: + session.time_per_stage[previous_stage] = 0 + + time_spent = (datetime.now(timezone.utc) - session.last_activity).total_seconds() + session.time_per_stage[previous_stage] += time_spent + + # Update session + session.current_stage = stage + session.last_activity = datetime.now(timezone.utc) + + if stage not in session.stages_completed: + session.stages_completed.append(stage) + + # Detect hesitation patterns + await self._detect_hesitation(session, previous_stage, time_spent, metadata) + + # Update conversion probability + session.conversion_probability = await self._update_conversion_probability(session) + + logger.info(f"Session {session_id} progressed to {stage.value}") + + async def _detect_hesitation( + self, + session: CheckoutSession, + stage: CheckoutStage, + time_spent: float, + metadata: Optional[Dict[str, Any]] + ): + """Detect customer hesitation patterns.""" + + # Time-based hesitation detection + stage_thresholds = { + CheckoutStage.CART_REVIEW: 120, # 2 minutes + CheckoutStage.SHIPPING_INFO: 180, # 3 minutes + CheckoutStage.PAYMENT_METHOD: 240, # 4 minutes + CheckoutStage.ORDER_CONFIRMATION: 60 # 1 minute + } + + if stage in stage_thresholds and time_spent > stage_thresholds[stage]: + hesitation_point = { + "stage": stage.value, + "time_spent": time_spent, + "timestamp": datetime.now(timezone.utc), + "reason": "excessive_time", + "metadata": metadata or {} + } + session.hesitation_points.append(hesitation_point) + + # Trigger intervention + await self._trigger_intervention(session, hesitation_point) + + async def _trigger_intervention( + self, + session: CheckoutSession, + hesitation_point: Dict[str, Any] + ): + """Trigger appropriate intervention for hesitation.""" + + # Find suitable recovery strategy + suitable_strategies = [] + + for strategy in self.recovery_strategies: + if self._strategy_matches_situation(strategy, session, hesitation_point): + suitable_strategies.append(strategy) + + if suitable_strategies: + # Use most relevant strategy + best_strategy = max( + suitable_strategies, + key=lambda s: self._calculate_strategy_relevance(s, session) + ) + + await self._apply_recovery_strategy(session, best_strategy) + + def _strategy_matches_situation( + self, + strategy: Dict[str, Any], + session: CheckoutSession, + hesitation_point: Dict[str, Any] + ) -> bool: + """Check if strategy matches current situation.""" + + conditions = strategy.get("conditions", []) + + # Check various conditions + if "price_sensitivity_high" in conditions: + return len(session.price_sensitivity_signals) > 0 + + if "unexpected_costs" in conditions: + return hesitation_point["stage"] == CheckoutStage.SHIPPING_INFO.value + + if "technical_issues" in conditions: + return len(session.support_requests) > 0 + + if "no_urgency" in conditions: + cart_value = sum(item.get("price", 0) for item in session.cart_items) + return cart_value < 200 + + return True # Default: strategy applies + + def _calculate_strategy_relevance( + self, + strategy: Dict[str, Any], + session: CheckoutSession + ) -> float: + """Calculate how relevant a strategy is for this session.""" + + relevance = 0.5 # Base relevance + + # Higher relevance for fewer recovery attempts + if session.recovery_attempts == 0: + relevance += 0.3 + elif session.recovery_attempts == 1: + relevance += 0.1 + else: + relevance -= 0.2 + + # Adjust based on conversion probability + if session.conversion_probability < 0.5: + relevance += 0.2 # More aggressive for low probability + + return max(0.0, min(1.0, relevance)) + + async def _apply_recovery_strategy( + self, + session: CheckoutSession, + strategy: Dict[str, Any] + ): + """Apply recovery strategy to prevent abandonment.""" + + incentive = { + "strategy_name": strategy["name"], + "applied_at": datetime.now(timezone.utc), + "stage": session.current_stage.value, + "message": strategy["message"] + } + + # Apply specific actions + action = strategy.get("action") + + if action == "offer_discount": + discount_percentage = strategy.get("discount_percentage", 10) + incentive["discount_percentage"] = discount_percentage + incentive["discount_amount"] = self._calculate_discount_amount(session, discount_percentage) + + elif action == "offer_free_shipping": + incentive["free_shipping"] = True + incentive["shipping_savings"] = 15 # Mock shipping cost + + elif action == "create_urgency": + incentive["urgency_message"] = True + incentive["stock_level"] = "low" + + session.applied_incentives.append(incentive) + session.recovery_attempts += 1 + + # Send intervention message (would integrate with chat system) + await self._send_intervention_message(session, incentive) + + logger.info(f"Applied recovery strategy '{strategy['name']}' to session {session.session_id}") + + def _calculate_discount_amount(self, session: CheckoutSession, percentage: float) -> float: + """Calculate discount amount for cart.""" + + cart_total = sum(item.get("price", 0) for item in session.cart_items) + return cart_total * (percentage / 100) + + async def _send_intervention_message( + self, + session: CheckoutSession, + incentive: Dict[str, Any] + ): + """Send intervention message to customer.""" + + # This would integrate with the unified chat manager + message = incentive["message"] + + # Add specific details based on incentive type + if "discount_percentage" in incentive: + savings = incentive["discount_amount"] + message += f" You'll save ${savings:.2f}!" + + if "free_shipping" in incentive: + message += f" Plus free shipping (save ${incentive['shipping_savings']})!" + + # Mock sending message + logger.info(f"Intervention message for {session.customer_id}: {message}") + + # In production, this would send through the chat system: + # await chat_manager.send_message(session.customer_id, message) + + async def _update_conversion_probability(self, session: CheckoutSession) -> float: + """Update conversion probability based on session progress.""" + + base_probability = session.conversion_probability + + # Positive factors + stages_completed_count = len(session.stages_completed) + stage_completion_bonus = stages_completed_count * 0.05 + + # Negative factors + hesitation_penalty = len(session.hesitation_points) * 0.1 + time_penalty = max(0, (datetime.now(timezone.utc) - session.start_time).total_seconds() / 3600 - 0.5) * 0.1 + + # Recovery factor + recovery_bonus = len(session.applied_incentives) * 0.05 + + new_probability = base_probability + stage_completion_bonus + recovery_bonus - hesitation_penalty - time_penalty + + return max(0.1, min(0.9, new_probability)) + + async def handle_abandonment(self, session_id: str, reason: AbandonmentReason): + """Handle cart abandonment with follow-up strategy.""" + + if session_id not in self.active_sessions: + return + + session = self.active_sessions[session_id] + session.current_stage = CheckoutStage.ABANDONED + + # Record abandonment pattern + abandonment_data = { + "customer_id": session.customer_id, + "reason": reason.value, + "stage": session.current_stage.value, + "cart_value": sum(item.get("price", 0) for item in session.cart_items), + "time_to_abandonment": (datetime.now(timezone.utc) - session.start_time).total_seconds(), + "hesitation_points": len(session.hesitation_points), + "recovery_attempts": session.recovery_attempts + } + + if session.customer_id not in self.abandonment_patterns: + self.abandonment_patterns[session.customer_id] = [] + + self.abandonment_patterns[session.customer_id].append(abandonment_data) + + # Schedule follow-up recovery + await self._schedule_follow_up_recovery(session, reason) + + logger.info(f"Session {session_id} abandoned at {reason.value}") + + async def _schedule_follow_up_recovery( + self, + session: CheckoutSession, + reason: AbandonmentReason + ): + """Schedule follow-up recovery messages.""" + + # Immediate follow-up (5 minutes) + asyncio.create_task( + self._delayed_recovery_message( + session, 300, "immediate_followup" + ) + ) + + # Short-term follow-up (2 hours) + asyncio.create_task( + self._delayed_recovery_message( + session, 7200, "short_term_followup" + ) + ) + + # Long-term follow-up (24 hours) + asyncio.create_task( + self._delayed_recovery_message( + session, 86400, "long_term_followup" + ) + ) + + async def _delayed_recovery_message( + self, + session: CheckoutSession, + delay_seconds: int, + recovery_type: str + ): + """Send delayed recovery message.""" + + await asyncio.sleep(delay_seconds) + + # Check if customer hasn't completed purchase elsewhere + if session.current_stage == CheckoutStage.ABANDONED: + message = await self._generate_recovery_message(session, recovery_type) + await self._send_intervention_message(session, {"message": message}) + + async def _generate_recovery_message( + self, + session: CheckoutSession, + recovery_type: str + ) -> str: + """Generate personalized recovery message.""" + + cart_items = [item.get("name", "item") for item in session.cart_items] + cart_value = sum(item.get("price", 0) for item in session.cart_items) + + if recovery_type == "immediate_followup": + return (f"Hi! I noticed you were interested in {', '.join(cart_items[:2])}. " + f"Your cart is still saved. Would you like to complete your purchase?") + + elif recovery_type == "short_term_followup": + return (f"๐Ÿ›๏ธ Your cart (${cart_value:.2f}) is waiting! " + f"Complete your purchase now and get free shipping. " + f"Items are selling fast!") + + elif recovery_type == "long_term_followup": + return (f"๐Ÿ’Ž Special offer just for you! " + f"Come back and get 15% off your saved cart. " + f"This exclusive offer expires in 48 hours.") + + return "Your cart is still waiting for you!" + + async def enable_one_click_purchase(self, customer_id: str) -> Dict[str, Any]: + """Enable one-click purchasing for returning customers.""" + + # Mock customer payment profile + payment_profile = { + "customer_id": customer_id, + "has_saved_payment": True, + "has_saved_address": True, + "preferred_payment_method": "credit_card_ending_1234", + "default_shipping_address": { + "street": "123 Main St", + "city": "Anytown", + "state": "CA", + "zip": "12345" + }, + "one_click_enabled": True + } + + return payment_profile + + async def process_one_click_purchase( + self, + customer_id: str, + cart_items: List[Dict[str, Any]] + ) -> Dict[str, Any]: + """Process one-click purchase.""" + + # Simulate instant checkout + order_id = f"order_{int(datetime.now().timestamp())}" + + result = { + "order_id": order_id, + "customer_id": customer_id, + "items": cart_items, + "total": sum(item.get("price", 0) for item in cart_items), + "status": "completed", + "processing_time": 0.5, # 500ms + "payment_method": "saved_card_1234", + "shipping_address": "default", + "estimated_delivery": (datetime.now() + timedelta(days=2)).strftime("%Y-%m-%d") + } + + logger.info(f"One-click purchase completed: {order_id}") + + return result + + async def _monitor_sessions(self): + """Monitor active sessions for abandonment signs.""" + + while True: + try: + current_time = datetime.now(timezone.utc) + + for session_id, session in list(self.active_sessions.items()): + # Check for session timeout + time_since_activity = (current_time - session.last_activity).total_seconds() + + if time_since_activity > 1800: # 30 minutes + await self.handle_abandonment(session_id, AbandonmentReason.NO_URGENCY) + del self.active_sessions[session_id] + + await asyncio.sleep(60) # Check every minute + + except Exception as e: + logger.error(f"Error monitoring sessions: {e}") + await asyncio.sleep(60) + + async def _process_recovery_queue(self): + """Process recovery actions queue.""" + + while True: + try: + # Process any queued recovery actions + await asyncio.sleep(30) + + except Exception as e: + logger.error(f"Error processing recovery queue: {e}") + await asyncio.sleep(30) + + def get_optimization_analytics(self) -> Dict[str, Any]: + """Get checkout optimization analytics.""" + + total_sessions = len(self.active_sessions) + completed_sessions = sum( + 1 for s in self.active_sessions.values() + if s.current_stage == CheckoutStage.COMPLETED + ) + abandoned_sessions = sum( + 1 for s in self.active_sessions.values() + if s.current_stage == CheckoutStage.ABANDONED + ) + + return { + "total_sessions": total_sessions, + "completed_sessions": completed_sessions, + "abandoned_sessions": abandoned_sessions, + "conversion_rate": completed_sessions / max(1, total_sessions), + "abandonment_rate": abandoned_sessions / max(1, total_sessions), + "average_recovery_attempts": np.mean([ + s.recovery_attempts for s in self.active_sessions.values() + ]) if self.active_sessions else 0, + "total_interventions": sum( + len(s.applied_incentives) for s in self.active_sessions.values() + ) + } + + # Payment Processing Implementation Methods + + async def _process_ap2_payment(self, payment_details: PaymentDetails): + """Process payment using AP2 protocol.""" + + try: + # Create AP2 payment request + ap2_config = self.payment_processors["ap2"] + + payment_request = { + "merchant_id": ap2_config["merchant_id"], + "amount": str(payment_details.converted_amount), + "currency": payment_details.customer_currency.value, + "payment_id": payment_details.payment_id, + "description": f"Purchase via AI Shopping Agent" + } + + # Calculate processing fee + is_international = payment_details.original_currency != payment_details.customer_currency + fee_rate = ap2_config["fees"]["international" if is_international else "domestic"] + payment_details.processing_fee = payment_details.converted_amount * fee_rate + + # Mock AP2 API call (in production, use actual AP2 SDK) + # Mock successful authorization + payment_details.authorization_id = f"ap2_auth_{payment_details.payment_id}" + payment_details.status = PaymentStatus.AUTHORIZED + payment_details.authorized_at = datetime.now(timezone.utc) + + # Auto-capture for AP2 + await self._capture_payment(payment_details) + + except Exception as e: + payment_details.status = PaymentStatus.FAILED + payment_details.error_message = str(e) + raise + + async def _process_stripe_payment(self, payment_details: PaymentDetails): + """Process payment using Stripe.""" + + try: + stripe_config = self.payment_processors["stripe"] + + # Calculate fees + is_international = payment_details.original_currency != payment_details.customer_currency + fee_rate = stripe_config["fees"]["international" if is_international else "domestic"] + payment_details.processing_fee = payment_details.converted_amount * fee_rate + + # Mock successful authorization (in production, use Stripe SDK) + payment_details.authorization_id = f"stripe_auth_{payment_details.payment_id}" + payment_details.status = PaymentStatus.AUTHORIZED + payment_details.authorized_at = datetime.now(timezone.utc) + + # Auto-capture + await self._capture_payment(payment_details) + + except Exception as e: + payment_details.status = PaymentStatus.FAILED + payment_details.error_message = str(e) + raise + + async def _process_paypal_payment(self, payment_details: PaymentDetails): + """Process payment using PayPal.""" + + try: + paypal_config = self.payment_processors["paypal"] + + # Calculate fees + is_international = payment_details.original_currency != payment_details.customer_currency + fee_rate = paypal_config["fees"]["international" if is_international else "domestic"] + payment_details.processing_fee = payment_details.converted_amount * fee_rate + + # Mock successful authorization (in production, use PayPal SDK) + payment_details.authorization_id = f"paypal_auth_{payment_details.payment_id}" + payment_details.status = PaymentStatus.AUTHORIZED + payment_details.authorized_at = datetime.now(timezone.utc) + + # Auto-capture + await self._capture_payment(payment_details) + + except Exception as e: + payment_details.status = PaymentStatus.FAILED + payment_details.error_message = str(e) + raise + + async def _capture_payment(self, payment_details: PaymentDetails): + """Capture authorized payment.""" + + try: + # Mock capture process + payment_details.capture_id = f"capture_{payment_details.payment_id}" + payment_details.status = PaymentStatus.CAPTURED + payment_details.captured_at = datetime.now(timezone.utc) + + # Queue for settlement + await self._queue_for_settlement(payment_details) + + logger.info(f"Payment captured: {payment_details.payment_id}") + + except Exception as e: + payment_details.status = PaymentStatus.FAILED + payment_details.error_message = str(e) + raise + + async def _queue_for_settlement(self, payment_details: PaymentDetails): + """Queue payment for settlement processing.""" + + try: + settlement_id = f"settle_{payment_details.payment_id}" + + settlement_details = SettlementDetails( + settlement_id=settlement_id, + payment_id=payment_details.payment_id, + merchant_account="default", + settlement_amount=payment_details.converted_amount - payment_details.processing_fee, + settlement_currency=payment_details.customer_currency, + settlement_date=datetime.now(timezone.utc) + timedelta(days=1), # T+1 settlement + processing_fees=payment_details.processing_fee, + fx_fees=payment_details.currency_conversion_fee + ) + + # Store settlement details in session + for session in self.active_sessions.values(): + if (session.payment_details and + session.payment_details.payment_id == payment_details.payment_id): + session.settlement_details = settlement_details + session.current_stage = CheckoutStage.SETTLEMENT_PROCESSING + break + + # Queue in settlement service + await self.settlement_service.queue_settlement(settlement_details) + + logger.info(f"Queued for settlement: {settlement_id}") + + except Exception as e: + logger.error(f"Failed to queue settlement: {e}") + + async def _process_settlements(self): + """Background task to process settlements.""" + + while True: + try: + await self.settlement_service.process_pending_settlements() + await asyncio.sleep(3600) # Process every hour + + except Exception as e: + logger.error(f"Settlement processing error: {e}") + await asyncio.sleep(60) # Retry in 1 minute + + +class CurrencyConversionService: + """Handles currency conversion and exchange rates.""" + + def __init__(self): + self.exchange_rates_cache = {} + self.cache_expiry = {} + self.base_currency = CurrencyCode.USD + + async def convert_currency( + self, + amount: Decimal, + from_currency: CurrencyCode, + to_currency: CurrencyCode + ) -> Tuple[Decimal, Decimal, Decimal]: + """Convert currency and return (exchange_rate, converted_amount, conversion_fee).""" + + if from_currency == to_currency: + return Decimal('1.0'), amount, Decimal('0.00') + + try: + # Get exchange rate + exchange_rate = await self._get_exchange_rate(from_currency, to_currency) + + # Convert amount + converted_amount = (amount * exchange_rate).quantize( + Decimal('0.01'), rounding=ROUND_HALF_UP + ) + + # Calculate conversion fee (0.5% of converted amount) + conversion_fee = (converted_amount * Decimal('0.005')).quantize( + Decimal('0.01'), rounding=ROUND_HALF_UP + ) + + return exchange_rate, converted_amount, conversion_fee + + except Exception as e: + logger.error(f"Currency conversion failed: {e}") + # Return original amount as fallback + return Decimal('1.0'), amount, Decimal('0.00') + + async def _get_exchange_rate( + self, + from_currency: CurrencyCode, + to_currency: CurrencyCode + ) -> Decimal: + """Get current exchange rate between currencies.""" + + cache_key = f"{from_currency.value}_{to_currency.value}" + current_time = datetime.now(timezone.utc) + + # Check cache first + if (cache_key in self.exchange_rates_cache and + cache_key in self.cache_expiry and + current_time < self.cache_expiry[cache_key]): + return self.exchange_rates_cache[cache_key] + + try: + # In production, use real exchange rate API + # For demo, use mock rates + mock_rates = { + "USD_EUR": Decimal('0.85'), + "USD_GBP": Decimal('0.73'), + "USD_JPY": Decimal('110.0'), + "USD_CAD": Decimal('1.25'), + "USD_AUD": Decimal('1.35'), + "EUR_USD": Decimal('1.18'), + "GBP_USD": Decimal('1.37'), + "JPY_USD": Decimal('0.009'), + "CAD_USD": Decimal('0.80'), + "AUD_USD": Decimal('0.74') + } + + # Try direct rate + if cache_key in mock_rates: + rate = mock_rates[cache_key] + else: + # Try reverse rate + reverse_key = f"{to_currency.value}_{from_currency.value}" + if reverse_key in mock_rates: + rate = Decimal('1.0') / mock_rates[reverse_key] + else: + # Fallback to 1.0 + rate = Decimal('1.0') + + # Cache for 1 hour + self.exchange_rates_cache[cache_key] = rate + self.cache_expiry[cache_key] = current_time + timedelta(hours=1) + + return rate + + except Exception as e: + logger.error(f"Failed to get exchange rate: {e}") + return Decimal('1.0') + + +class SettlementService: + """Handles payment settlement processing.""" + + def __init__(self): + self.pending_settlements = [] + self.processed_settlements = [] + + async def queue_settlement(self, settlement_details: SettlementDetails): + """Queue a settlement for processing.""" + + self.pending_settlements.append(settlement_details) + logger.info(f"Settlement queued: {settlement_details.settlement_id}") + + async def process_pending_settlements(self): + """Process all pending settlements.""" + + if not self.pending_settlements: + return + + logger.info(f"Processing {len(self.pending_settlements)} pending settlements") + + # Process settlements that are due + current_time = datetime.now(timezone.utc) + + settlements_to_process = [ + s for s in self.pending_settlements + if s.settlement_date <= current_time + ] + + for settlement in settlements_to_process: + try: + await self._process_settlement(settlement) + self.pending_settlements.remove(settlement) + self.processed_settlements.append(settlement) + + except Exception as e: + logger.error(f"Settlement processing failed for {settlement.settlement_id}: {e}") + + async def _process_settlement(self, settlement: SettlementDetails): + """Process individual settlement.""" + + try: + # Mock settlement processing + settlement.status = "completed" + settlement.confirmation_number = f"conf_{settlement.settlement_id}" + + # In production, this would: + # 1. Transfer funds to merchant account + # 2. Generate settlement report + # 3. Send notifications + # 4. Update accounting records + + logger.info(f"Settlement processed: {settlement.settlement_id}") + + except Exception as e: + settlement.status = "failed" + logger.error(f"Settlement failed: {settlement.settlement_id} - {e}") + raise + + +# Global instance +checkout_optimizer = ConversionOptimizer() + + +# Usage Example for Enhanced Payment Processing +async def example_enhanced_checkout(): + """Example demonstrating enhanced payment processing with currency conversion.""" + + # Start checkout session + cart_items = [ + {"name": "Laptop", "price": 999.99, "currency": "USD"}, + {"name": "Mouse", "price": 29.99, "currency": "USD"} + ] + + session_id = await checkout_optimizer.start_checkout_session( + customer_id="customer_123", + cart_items=cart_items + ) + + # Auto-detect customer currency (mock IP from UK) + detected_currency = await checkout_optimizer.detect_customer_currency( + session_id, + customer_ip="81.2.69.142" # UK IP + ) + print(f"Detected customer currency: {detected_currency.value}") + + # Initiate payment with automatic currency conversion + try: + payment_details = await checkout_optimizer.initiate_payment( + session_id=session_id, + payment_method="ap2", + amount=Decimal("1029.98"), # Total cart value + original_currency=CurrencyCode.USD + ) + + print(f"Payment processed:") + print(f" Payment ID: {payment_details.payment_id}") + print(f" Original Amount: {payment_details.amount} {payment_details.original_currency.value}") + print(f" Converted Amount: {payment_details.converted_amount} {payment_details.customer_currency.value}") + print(f" Exchange Rate: {payment_details.exchange_rate}") + print(f" Processing Fee: {payment_details.processing_fee}") + print(f" Conversion Fee: {payment_details.currency_conversion_fee}") + print(f" Status: {payment_details.status.value}") + + # Check settlement details + session = checkout_optimizer.active_sessions[session_id] + if session.settlement_details: + settlement = session.settlement_details + print(f"\nSettlement Details:") + print(f" Settlement ID: {settlement.settlement_id}") + print(f" Settlement Amount: {settlement.settlement_amount} {settlement.settlement_currency.value}") + print(f" Settlement Date: {settlement.settlement_date}") + print(f" Status: {settlement.status}") + + except Exception as e: + print(f"Payment failed: {e}") + + +if __name__ == "__main__": + import asyncio + asyncio.run(example_enhanced_checkout()) \ No newline at end of file diff --git a/scripts/automation/cloud-deploy.sh b/scripts/automation/cloud-deploy.sh new file mode 100644 index 00000000..37c7ecb9 --- /dev/null +++ b/scripts/automation/cloud-deploy.sh @@ -0,0 +1,537 @@ +#!/bin/bash + +# Cloud Deployment Script for AI Shopping Concierge +# Supports AWS ECS, Google Cloud Run, and Azure Container Instances + +set -e + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(dirname "$(dirname "$SCRIPT_DIR")")" +DEPLOYMENT_CONFIG="$PROJECT_DIR/deployment/cloud-config.env" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } +log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; } +log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +# Load configuration +if [[ -f "$DEPLOYMENT_CONFIG" ]]; then + source "$DEPLOYMENT_CONFIG" + log_info "Loaded configuration from $DEPLOYMENT_CONFIG" +else + log_warning "No configuration file found at $DEPLOYMENT_CONFIG" + log_info "Using default configuration values" +fi + +# Default values +CLOUD_PROVIDER=${CLOUD_PROVIDER:-"aws"} +REGION=${REGION:-"us-west-2"} +PROJECT_NAME=${PROJECT_NAME:-"ai-shopping-concierge"} +ENVIRONMENT=${ENVIRONMENT:-"production"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +DOMAIN_NAME=${DOMAIN_NAME:-""} + +# Display usage +usage() { + cat << EOF +Usage: $0 [OPTIONS] COMMAND + +Cloud deployment script for AI Shopping Concierge + +Commands: + deploy Deploy to cloud platform + destroy Remove deployment from cloud platform + status Check deployment status + logs View application logs + scale Scale the deployment + update Update the deployment with new image + rollback Rollback to previous deployment + +Options: + -p, --provider Cloud provider (aws|gcp|azure) [default: aws] + -r, --region Deployment region [default: us-west-2] + -e, --env Environment (dev|staging|production) [default: production] + -t, --tag Docker image tag [default: latest] + -d, --domain Domain name for the service + -h, --help Show this help message + +Examples: + $0 deploy + $0 --provider gcp --region us-central1 deploy + $0 --env staging scale 5 + $0 --tag v1.2.3 update +EOF +} + +# Parse command line arguments +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + -p|--provider) + CLOUD_PROVIDER="$2" + shift 2 + ;; + -r|--region) + REGION="$2" + shift 2 + ;; + -e|--env) + ENVIRONMENT="$2" + shift 2 + ;; + -t|--tag) + IMAGE_TAG="$2" + shift 2 + ;; + -d|--domain) + DOMAIN_NAME="$2" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + deploy|destroy|status|logs|scale|update|rollback) + COMMAND="$1" + shift + ;; + *) + log_error "Unknown option: $1" + usage + exit 1 + ;; + esac + done +} + +# Check prerequisites +check_prerequisites() { + log_info "Checking prerequisites for $CLOUD_PROVIDER deployment..." + + case $CLOUD_PROVIDER in + aws) + if ! command -v aws &> /dev/null; then + log_error "AWS CLI not found. Please install AWS CLI." + exit 1 + fi + if ! command -v ecs-cli &> /dev/null; then + log_warning "ECS CLI not found. Some features may not be available." + fi + ;; + gcp) + if ! command -v gcloud &> /dev/null; then + log_error "Google Cloud CLI not found. Please install gcloud." + exit 1 + fi + ;; + azure) + if ! command -v az &> /dev/null; then + log_error "Azure CLI not found. Please install Azure CLI." + exit 1 + fi + ;; + *) + log_error "Unsupported cloud provider: $CLOUD_PROVIDER" + exit 1 + ;; + esac + + if ! command -v docker &> /dev/null; then + log_error "Docker not found. Please install Docker." + exit 1 + fi + + log_success "Prerequisites check passed" +} + +# Build and push Docker image +build_and_push_image() { + log_info "Building and pushing Docker image..." + + local image_name="$PROJECT_NAME:$IMAGE_TAG" + local registry_url + + case $CLOUD_PROVIDER in + aws) + # Get AWS account ID and ECR registry URL + local aws_account_id=$(aws sts get-caller-identity --query Account --output text) + registry_url="$aws_account_id.dkr.ecr.$REGION.amazonaws.com" + + # Create ECR repository if it doesn't exist + aws ecr describe-repositories --repository-names "$PROJECT_NAME" --region "$REGION" &>/dev/null || \ + aws ecr create-repository --repository-name "$PROJECT_NAME" --region "$REGION" + + # Get ECR login token + aws ecr get-login-password --region "$REGION" | \ + docker login --username AWS --password-stdin "$registry_url" + ;; + gcp) + # Set up Google Container Registry + registry_url="gcr.io/$(gcloud config get-value project)" + gcloud auth configure-docker --quiet + ;; + azure) + # Set up Azure Container Registry + local acr_name="${PROJECT_NAME//-/}acr" + registry_url="$acr_name.azurecr.io" + + # Create ACR if it doesn't exist + az acr show --name "$acr_name" --resource-group "$PROJECT_NAME-rg" &>/dev/null || \ + az acr create --name "$acr_name" --resource-group "$PROJECT_NAME-rg" --sku Basic + + az acr login --name "$acr_name" + ;; + esac + + local full_image_name="$registry_url/$image_name" + + # Build and push image + docker build -t "$image_name" -t "$full_image_name" "$PROJECT_DIR" + docker push "$full_image_name" + + log_success "Image pushed to $full_image_name" + echo "$full_image_name" +} + +# Deploy to AWS ECS +deploy_aws() { + log_info "Deploying to AWS ECS..." + + local image_url=$(build_and_push_image) + local cluster_name="$PROJECT_NAME-$ENVIRONMENT" + local service_name="$PROJECT_NAME-service" + local task_family="$PROJECT_NAME-task" + + # Create ECS cluster + aws ecs create-cluster --cluster-name "$cluster_name" --region "$REGION" &>/dev/null || true + + # Create task definition + local task_definition=$(cat << EOF +{ + "family": "$task_family", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "1024", + "memory": "2048", + "executionRoleArn": "arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):role/ecsTaskExecutionRole", + "taskRoleArn": "arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):role/ecsTaskRole", + "containerDefinitions": [ + { + "name": "$PROJECT_NAME", + "image": "$image_url", + "portMappings": [ + { + "containerPort": 8000, + "protocol": "tcp" + } + ], + "environment": [ + {"name": "ENVIRONMENT", "value": "$ENVIRONMENT"}, + {"name": "DEBUG", "value": "false"} + ], + "secrets": [ + {"name": "DATABASE_URL", "valueFrom": "arn:aws:secretsmanager:$REGION:$(aws sts get-caller-identity --query Account --output text):secret:$PROJECT_NAME-db-url"}, + {"name": "REDIS_URL", "valueFrom": "arn:aws:secretsmanager:$REGION:$(aws sts get-caller-identity --query Account --output text):secret:$PROJECT_NAME-redis-url"} + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/$PROJECT_NAME", + "awslogs-region": "$REGION", + "awslogs-stream-prefix": "ecs" + } + } + } + ] +} +EOF +) + + # Register task definition + aws ecs register-task-definition --cli-input-json "$task_definition" --region "$REGION" + + # Create or update service + local service_exists=$(aws ecs describe-services --cluster "$cluster_name" --services "$service_name" --region "$REGION" --query 'services[0].status' --output text 2>/dev/null) + + if [[ "$service_exists" != "None" && "$service_exists" != "" ]]; then + log_info "Updating existing ECS service..." + aws ecs update-service \ + --cluster "$cluster_name" \ + --service "$service_name" \ + --task-definition "$task_family" \ + --region "$REGION" + else + log_info "Creating new ECS service..." + aws ecs create-service \ + --cluster "$cluster_name" \ + --service-name "$service_name" \ + --task-definition "$task_family" \ + --desired-count 2 \ + --launch-type FARGATE \ + --network-configuration "awsvpcConfiguration={subnets=[subnet-12345,subnet-67890],securityGroups=[sg-12345],assignPublicIp=ENABLED}" \ + --region "$REGION" + fi + + log_success "AWS ECS deployment completed" +} + +# Deploy to Google Cloud Run +deploy_gcp() { + log_info "Deploying to Google Cloud Run..." + + local image_url=$(build_and_push_image) + local service_name="$PROJECT_NAME-$ENVIRONMENT" + + # Deploy to Cloud Run + gcloud run deploy "$service_name" \ + --image "$image_url" \ + --platform managed \ + --region "$REGION" \ + --allow-unauthenticated \ + --port 8000 \ + --memory 2Gi \ + --cpu 2 \ + --max-instances 10 \ + --set-env-vars "ENVIRONMENT=$ENVIRONMENT,DEBUG=false" \ + --quiet + + # Get service URL + local service_url=$(gcloud run services describe "$service_name" --region "$REGION" --format 'value(status.url)') + + log_success "Google Cloud Run deployment completed" + log_info "Service URL: $service_url" +} + +# Deploy to Azure Container Instances +deploy_azure() { + log_info "Deploying to Azure Container Instances..." + + local image_url=$(build_and_push_image) + local resource_group="$PROJECT_NAME-rg" + local container_name="$PROJECT_NAME-$ENVIRONMENT" + + # Create resource group + az group create --name "$resource_group" --location "$REGION" &>/dev/null || true + + # Deploy container + az container create \ + --resource-group "$resource_group" \ + --name "$container_name" \ + --image "$image_url" \ + --cpu 2 \ + --memory 4 \ + --ports 8000 \ + --environment-variables "ENVIRONMENT=$ENVIRONMENT" "DEBUG=false" \ + --restart-policy Always \ + --location "$REGION" + + # Get container IP + local container_ip=$(az container show --resource-group "$resource_group" --name "$container_name" --query 'ipAddress.ip' --output tsv) + + log_success "Azure Container Instances deployment completed" + log_info "Container IP: $container_ip" +} + +# Main deployment function +deploy() { + check_prerequisites + + case $CLOUD_PROVIDER in + aws) + deploy_aws + ;; + gcp) + deploy_gcp + ;; + azure) + deploy_azure + ;; + esac +} + +# Destroy deployment +destroy() { + log_info "Destroying $CLOUD_PROVIDER deployment..." + + case $CLOUD_PROVIDER in + aws) + local cluster_name="$PROJECT_NAME-$ENVIRONMENT" + local service_name="$PROJECT_NAME-service" + + # Delete ECS service + aws ecs update-service --cluster "$cluster_name" --service "$service_name" --desired-count 0 --region "$REGION" || true + aws ecs delete-service --cluster "$cluster_name" --service "$service_name" --region "$REGION" || true + + # Delete ECS cluster + aws ecs delete-cluster --cluster "$cluster_name" --region "$REGION" || true + ;; + gcp) + local service_name="$PROJECT_NAME-$ENVIRONMENT" + gcloud run services delete "$service_name" --region "$REGION" --quiet || true + ;; + azure) + local resource_group="$PROJECT_NAME-rg" + az group delete --name "$resource_group" --yes --no-wait || true + ;; + esac + + log_success "Deployment destroyed" +} + +# Check deployment status +status() { + log_info "Checking deployment status on $CLOUD_PROVIDER..." + + case $CLOUD_PROVIDER in + aws) + local cluster_name="$PROJECT_NAME-$ENVIRONMENT" + local service_name="$PROJECT_NAME-service" + aws ecs describe-services --cluster "$cluster_name" --services "$service_name" --region "$REGION" + ;; + gcp) + local service_name="$PROJECT_NAME-$ENVIRONMENT" + gcloud run services describe "$service_name" --region "$REGION" + ;; + azure) + local resource_group="$PROJECT_NAME-rg" + local container_name="$PROJECT_NAME-$ENVIRONMENT" + az container show --resource-group "$resource_group" --name "$container_name" + ;; + esac +} + +# View logs +logs() { + log_info "Fetching logs from $CLOUD_PROVIDER..." + + case $CLOUD_PROVIDER in + aws) + local log_group="/ecs/$PROJECT_NAME" + aws logs tail "$log_group" --follow --region "$REGION" + ;; + gcp) + local service_name="$PROJECT_NAME-$ENVIRONMENT" + gcloud logs tail "resource.type=cloud_run_revision AND resource.labels.service_name=$service_name" --location "$REGION" + ;; + azure) + local resource_group="$PROJECT_NAME-rg" + local container_name="$PROJECT_NAME-$ENVIRONMENT" + az container logs --resource-group "$resource_group" --name "$container_name" --follow + ;; + esac +} + +# Scale deployment +scale() { + local replicas=${1:-2} + log_info "Scaling deployment to $replicas replicas on $CLOUD_PROVIDER..." + + case $CLOUD_PROVIDER in + aws) + local cluster_name="$PROJECT_NAME-$ENVIRONMENT" + local service_name="$PROJECT_NAME-service" + aws ecs update-service --cluster "$cluster_name" --service "$service_name" --desired-count "$replicas" --region "$REGION" + ;; + gcp) + local service_name="$PROJECT_NAME-$ENVIRONMENT" + gcloud run services update "$service_name" --region "$REGION" --max-instances "$replicas" + ;; + azure) + log_warning "Azure Container Instances doesn't support auto-scaling. Manual recreation required." + ;; + esac + + log_success "Scaling completed" +} + +# Update deployment +update() { + log_info "Updating deployment with new image tag: $IMAGE_TAG" + deploy +} + +# Rollback deployment +rollback() { + log_info "Rolling back deployment on $CLOUD_PROVIDER..." + + case $CLOUD_PROVIDER in + aws) + local cluster_name="$PROJECT_NAME-$ENVIRONMENT" + local service_name="$PROJECT_NAME-service" + local previous_task_def=$(aws ecs list-task-definitions --family-prefix "$PROJECT_NAME-task" --status ACTIVE --sort DESC --region "$REGION" --query 'taskDefinitionArns[1]' --output text) + + if [[ -n "$previous_task_def" ]]; then + aws ecs update-service --cluster "$cluster_name" --service "$service_name" --task-definition "$previous_task_def" --region "$REGION" + log_success "Rollback completed" + else + log_error "No previous task definition found" + fi + ;; + gcp) + local service_name="$PROJECT_NAME-$ENVIRONMENT" + local previous_revision=$(gcloud run revisions list --service "$service_name" --region "$REGION" --limit 2 --sort-by "~metadata.creationTimestamp" --format "value(metadata.name)" | tail -n 1) + + if [[ -n "$previous_revision" ]]; then + gcloud run services update-traffic "$service_name" --to-revisions "$previous_revision=100" --region "$REGION" + log_success "Rollback completed" + else + log_error "No previous revision found" + fi + ;; + azure) + log_warning "Azure Container Instances doesn't support rollback. Manual deployment required." + ;; + esac +} + +# Main script execution +main() { + parse_args "$@" + + if [[ -z "$COMMAND" ]]; then + log_error "No command specified" + usage + exit 1 + fi + + case $COMMAND in + deploy) + deploy + ;; + destroy) + destroy + ;; + status) + status + ;; + logs) + logs + ;; + scale) + scale "$2" + ;; + update) + update + ;; + rollback) + rollback + ;; + *) + log_error "Unknown command: $COMMAND" + usage + exit 1 + ;; + esac +} + +# Run main function with all arguments +main "$@" \ No newline at end of file diff --git a/scripts/automation/deploy.sh b/scripts/automation/deploy.sh new file mode 100644 index 00000000..afa69e35 --- /dev/null +++ b/scripts/automation/deploy.sh @@ -0,0 +1,316 @@ +#!/bin/bash + +# AI Shopping Concierge - Automated Deployment Script +# Handles deployment to staging and production environments + +set -e + +echo "๐Ÿš€ AI Shopping Concierge - Automated Deployment" +echo "==============================================" + +# Configuration +ENVIRONMENT="${1:-staging}" # staging or production +GITHUB_USERNAME="${2:-ankitap}" +PRODUCT_REPO="ai-shopping-concierge-ap2" +PRODUCT_DIR="../$PRODUCT_REPO" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}โ„น๏ธ $1${NC}" +} + +log_success() { + echo -e "${GREEN}โœ… $1${NC}" +} + +log_warning() { + echo -e "${YELLOW}โš ๏ธ $1${NC}" +} + +log_error() { + echo -e "${RED}โŒ $1${NC}" +} + +# Validate environment +if [[ "$ENVIRONMENT" != "staging" && "$ENVIRONMENT" != "production" ]]; then + log_error "Invalid environment: $ENVIRONMENT" + log_error "Usage: ./deploy.sh [staging|production] [github_username]" + exit 1 +fi + +# Check if product repository exists +if [[ ! -d "$PRODUCT_DIR" ]]; then + log_error "Product repository not found at: $PRODUCT_DIR" + log_error "Please run the repository setup scripts first" + exit 1 +fi + +cd "$PRODUCT_DIR" + +log_info "Deploying AI Shopping Concierge to: $ENVIRONMENT" +log_info "Repository: https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO" + +# Step 1: Pre-deployment checks +log_info "Running pre-deployment checks..." + +# Check if we're on the right branch +CURRENT_BRANCH=$(git branch --show-current) +if [[ "$ENVIRONMENT" == "production" && "$CURRENT_BRANCH" != "main" ]]; then + log_warning "You're on branch '$CURRENT_BRANCH' but deploying to production" + read -p "๐Ÿค” Continue with production deployment? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + log_warning "Deployment cancelled" + exit 0 + fi +fi + +# Check for uncommitted changes +if [[ -n "$(git status --porcelain)" ]]; then + log_error "You have uncommitted changes. Please commit or stash them first." + git status --short + exit 1 +fi + +log_success "Pre-deployment checks passed" + +# Step 2: Run tests +log_info "Running test suite..." + +if [[ -f "requirements.txt" ]]; then + # Install test dependencies if needed + if ! python -c "import pytest" 2>/dev/null; then + log_info "Installing test dependencies..." + pip install pytest pytest-asyncio httpx + fi + + # Run tests + if python -m pytest tests/ -v; then + log_success "All tests passed" + else + log_error "Tests failed! Deployment aborted." + exit 1 + fi +else + log_warning "No requirements.txt found, skipping dependency check" +fi + +# Step 3: Build and validate Docker image +log_info "Building Docker image..." + +if [[ -f "Dockerfile" ]]; then + IMAGE_TAG="ai-shopping-concierge:$ENVIRONMENT-$(git rev-parse --short HEAD)" + + if docker build -t "$IMAGE_TAG" .; then + log_success "Docker image built: $IMAGE_TAG" + else + log_error "Docker build failed!" + exit 1 + fi + + # Quick health check + log_info "Running health check..." + CONTAINER_ID=$(docker run -d -p 8001:8000 "$IMAGE_TAG") + sleep 10 + + if curl -f http://localhost:8001/health >/dev/null 2>&1; then + log_success "Health check passed" + else + log_warning "Health check failed, but continuing..." + fi + + docker stop "$CONTAINER_ID" >/dev/null + docker rm "$CONTAINER_ID" >/dev/null +else + log_warning "No Dockerfile found, skipping Docker build" +fi + +# Step 4: Create deployment tag +DEPLOYMENT_TAG="deploy-$ENVIRONMENT-$(date +%Y%m%d-%H%M%S)" +log_info "Creating deployment tag: $DEPLOYMENT_TAG" + +git tag -a "$DEPLOYMENT_TAG" -m "Deployment to $ENVIRONMENT + +Environment: $ENVIRONMENT +Commit: $(git rev-parse HEAD) +Date: $(date) +Deployer: $(git config user.name)" + +git push origin "$DEPLOYMENT_TAG" +log_success "Deployment tag created and pushed" + +# Step 5: Environment-specific deployment +case "$ENVIRONMENT" in + staging) + deploy_to_staging + ;; + production) + deploy_to_production + ;; +esac + +deploy_to_staging() { + log_info "๐Ÿงช Deploying to STAGING environment..." + + # Deploy using Docker Compose + if [[ -f "docker-compose.staging.yml" ]]; then + docker-compose -f docker-compose.staging.yml down + docker-compose -f docker-compose.staging.yml up -d --build + elif [[ -f "docker-compose.yml" ]]; then + docker-compose down + docker-compose up -d --build + else + log_warning "No Docker Compose file found for staging" + fi + + # Wait for service to be ready + log_info "Waiting for service to be ready..." + sleep 30 + + # Run smoke tests + if command -v curl &> /dev/null; then + if curl -f http://localhost:8000/health; then + log_success "Staging deployment successful!" + log_info "๐ŸŒ Staging URL: http://localhost:8000" + else + log_error "Staging deployment health check failed" + exit 1 + fi + fi +} + +deploy_to_production() { + log_info "๐Ÿš€ Deploying to PRODUCTION environment..." + + # Extra confirmation for production + echo + log_warning "โš ๏ธ PRODUCTION DEPLOYMENT WARNING" + log_warning "This will deploy to production and affect live users!" + echo + read -p "๐Ÿค” Are you sure you want to continue? Type 'YES' to confirm: " CONFIRM + + if [[ "$CONFIRM" != "YES" ]]; then + log_warning "Production deployment cancelled" + exit 0 + fi + + # Deploy using production configuration + if [[ -f "docker-compose.production.yml" ]]; then + docker-compose -f docker-compose.production.yml down + docker-compose -f docker-compose.production.yml up -d --build + elif [[ -f "docker-compose.yml" ]]; then + # Use production environment variables + export ENVIRONMENT=production + docker-compose down + docker-compose up -d --build + else + log_error "No production deployment configuration found!" + exit 1 + fi + + # Wait for service to be ready + log_info "Waiting for production service to be ready..." + sleep 60 + + # Run production health checks + PRODUCTION_URL="https://ai-shopping-concierge.your-domain.com" # Update with your domain + if curl -f "$PRODUCTION_URL/health"; then + log_success "Production deployment successful!" + log_info "๐ŸŒ Production URL: $PRODUCTION_URL" + else + log_error "Production deployment health check failed" + + # Rollback option + read -p "๐Ÿ”„ Do you want to rollback? (y/N): " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + log_info "Rolling back..." + # Implement rollback logic here + log_success "Rollback completed" + fi + exit 1 + fi +} + +# Step 6: Post-deployment tasks +log_info "Running post-deployment tasks..." + +# Update monitoring dashboards +if command -v curl &> /dev/null; then + # Example: Update deployment tracking + curl -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "deployment": { + "environment": "'$ENVIRONMENT'", + "tag": "'$DEPLOYMENT_TAG'", + "timestamp": "'$(date -Iseconds)'", + "deployer": "'$(git config user.name)'" + } + }' \ + "https://your-monitoring-service.com/api/deployments" \ + 2>/dev/null || log_warning "Failed to update monitoring" +fi + +# Generate deployment report +DEPLOYMENT_REPORT="deployment-report-$DEPLOYMENT_TAG.txt" +cat > "$DEPLOYMENT_REPORT" << EOF +AI Shopping Concierge - Deployment Report +Generated: $(date) + +Environment: $ENVIRONMENT +Tag: $DEPLOYMENT_TAG +Commit: $(git rev-parse HEAD) +Deployer: $(git config user.name) + +Pre-deployment Checks: +โœ… Tests passed +โœ… Docker build successful +โœ… Health check passed + +Deployment Steps: +โœ… Tag created and pushed +โœ… Service deployed +โœ… Health check passed + +Post-deployment: +- Monitor application performance +- Check error rates and logs +- Verify all features are working +- Update stakeholders + +Rollback Command: +git checkout $DEPLOYMENT_TAG^ +./scripts/automation/deploy.sh $ENVIRONMENT + +EOF + +log_success "Deployment report generated: $DEPLOYMENT_REPORT" + +echo +log_success "๐ŸŽ‰ Deployment completed successfully!" +echo "===================================" +echo +log_info "๐Ÿ“Š Summary:" +log_info " - Environment: $ENVIRONMENT" +log_info " - Tag: $DEPLOYMENT_TAG" +log_info " - Commit: $(git rev-parse --short HEAD)" +log_info " - Report: $DEPLOYMENT_REPORT" +echo +log_info "๐Ÿ” Next steps:" +log_info " 1. Monitor application performance" +log_info " 2. Check logs for any issues" +log_info " 3. Verify all features are working" +log_info " 4. Update team/stakeholders" + +if [[ "$ENVIRONMENT" == "staging" ]]; then + echo + log_info "๐Ÿš€ Ready for production?" + log_info " ./scripts/automation/deploy.sh production" +fi \ No newline at end of file diff --git a/scripts/automation/maintenance.sh b/scripts/automation/maintenance.sh new file mode 100644 index 00000000..6a32141f --- /dev/null +++ b/scripts/automation/maintenance.sh @@ -0,0 +1,236 @@ +#!/bin/bash + +# AI Shopping Concierge - Automated Maintenance Script +# Handles routine maintenance tasks + +set -e + +echo "๐Ÿ”ง AI Shopping Concierge - Automated Maintenance" +echo "==============================================" + +# Configuration +GITHUB_USERNAME="${1:-ankitap}" +PRODUCT_REPO="ai-shopping-concierge-ap2" +PRODUCT_DIR="../$PRODUCT_REPO" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}โ„น๏ธ $1${NC}" +} + +log_success() { + echo -e "${GREEN}โœ… $1${NC}" +} + +log_warning() { + echo -e "${YELLOW}โš ๏ธ $1${NC}" +} + +log_error() { + echo -e "${RED}โŒ $1${NC}" +} + +# Step 1: Update dependencies +log_info "Updating dependencies..." + +if [[ -f "$PRODUCT_DIR/requirements.txt" ]]; then + cd "$PRODUCT_DIR" + + # Backup current requirements + cp requirements.txt requirements.txt.backup + + # Update Python packages + log_info "Checking for Python package updates..." + pip list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1 > outdated_packages.txt + + if [[ -s outdated_packages.txt ]]; then + log_info "Found outdated packages:" + cat outdated_packages.txt + + # Update packages (be careful with major version updates) + pip install --upgrade $(cat outdated_packages.txt | tr '\n' ' ') + pip freeze > requirements.txt + + log_success "Dependencies updated" + else + log_success "All dependencies are up to date" + fi + + rm -f outdated_packages.txt +else + log_warning "No requirements.txt found" +fi + +# Step 2: Clean up Docker resources +log_info "Cleaning up Docker resources..." + +if command -v docker &> /dev/null; then + # Remove unused images + docker image prune -f + + # Remove unused containers + docker container prune -f + + # Remove unused volumes + docker volume prune -f + + log_success "Docker cleanup completed" +else + log_warning "Docker not available" +fi + +# Step 3: Clean up logs +log_info "Cleaning up old logs..." + +if [[ -d "$PRODUCT_DIR/logs" ]]; then + cd "$PRODUCT_DIR/logs" + + # Remove logs older than 30 days + find . -name "*.log" -type f -mtime +30 -delete + + # Compress logs older than 7 days + find . -name "*.log" -type f -mtime +7 -exec gzip {} \; + + log_success "Log cleanup completed" +else + log_warning "No logs directory found" +fi + +# Step 4: Database maintenance +log_info "Running database maintenance..." + +if [[ -f "$PRODUCT_DIR/config/secrets.yaml" ]]; then + # Example database maintenance (adjust for your setup) + log_info "Database maintenance tasks:" + log_info " - Consider running VACUUM/ANALYZE on PostgreSQL" + log_info " - Check Redis memory usage" + log_info " - Review database connection pool settings" + + # You can add actual database maintenance commands here + # psql $DATABASE_URL -c "VACUUM ANALYZE;" + # redis-cli --eval "redis.call('FLUSHDB')" + + log_success "Database maintenance tasks noted" +else + log_warning "No database configuration found" +fi + +# Step 5: Security updates +log_info "Checking for security updates..." + +# Check for CVEs in dependencies +if command -v safety &> /dev/null; then + cd "$PRODUCT_DIR" + safety check --json > security_report.json 2>/dev/null || true + + if [[ -f "security_report.json" ]]; then + VULNERABILITIES=$(cat security_report.json | jq '.vulnerabilities | length' 2>/dev/null || echo "0") + if [[ "$VULNERABILITIES" -gt 0 ]]; then + log_warning "Found $VULNERABILITIES security vulnerabilities!" + log_warning "Run: safety check --full-report" + else + log_success "No security vulnerabilities found" + fi + rm -f security_report.json + fi +else + log_warning "Safety not installed. Run: pip install safety" +fi + +# Step 6: Performance monitoring +log_info "Checking performance metrics..." + +if command -v docker &> /dev/null && docker ps | grep -q ai-shopping-concierge; then + # Get container stats + CONTAINER_ID=$(docker ps | grep ai-shopping-concierge | awk '{print $1}') + if [[ -n "$CONTAINER_ID" ]]; then + log_info "Container resource usage:" + docker stats --no-stream "$CONTAINER_ID" + fi + + log_success "Performance metrics collected" +else + log_warning "AI Shopping Concierge container not running" +fi + +# Step 7: Backup important data +log_info "Creating backups..." + +BACKUP_DIR="$PRODUCT_DIR/backups/$(date +%Y%m%d)" +mkdir -p "$BACKUP_DIR" + +# Backup configuration +if [[ -d "$PRODUCT_DIR/config" ]]; then + cp -r "$PRODUCT_DIR/config" "$BACKUP_DIR/" +fi + +# Backup database (example - adjust for your setup) +if [[ -f "$PRODUCT_DIR/config/secrets.yaml" ]]; then + # Example backup commands (uncomment and adjust as needed): + # pg_dump $DATABASE_URL > "$BACKUP_DIR/database_backup.sql" + # redis-cli --rdb "$BACKUP_DIR/redis_backup.rdb" + + log_info "Database backup commands prepared (review and uncomment in script)" +fi + +log_success "Backup directory created: $BACKUP_DIR" + +# Step 8: Generate maintenance report +MAINTENANCE_REPORT="$PRODUCT_DIR/maintenance-report-$(date +%Y%m%d-%H%M%S).txt" + +cat > "$MAINTENANCE_REPORT" << EOF +AI Shopping Concierge - Maintenance Report +Generated: $(date) + +Maintenance Tasks Completed: +โœ… Dependencies updated +โœ… Docker resources cleaned +โœ… Old logs cleaned up +โœ… Database maintenance reviewed +โœ… Security check performed +โœ… Performance metrics collected +โœ… Backup directory created + +System Status: +- Python packages: $(pip list | wc -l) installed +- Docker images: $(docker images | grep ai-shopping-concierge | wc -l) AI Shopping Concierge images +- Log files: $(find "$PRODUCT_DIR/logs" -name "*.log" 2>/dev/null | wc -l) active log files +- Backup location: $BACKUP_DIR + +Recommendations: +1. Review security report if vulnerabilities were found +2. Monitor application performance after updates +3. Consider scheduling regular maintenance (weekly/monthly) +4. Update documentation if dependencies changed significantly + +Next Maintenance: $(date -d '+7 days') + +EOF + +log_success "Maintenance report generated: $MAINTENANCE_REPORT" + +echo +log_success "๐ŸŽ‰ Maintenance completed successfully!" +echo "====================================" +echo +log_info "๐Ÿ“Š Summary:" +log_info " - Dependencies updated" +log_info " - System cleaned up" +log_info " - Backups created" +log_info " - Report: $MAINTENANCE_REPORT" +echo +log_info "๐Ÿ” Next steps:" +log_info " 1. Review the maintenance report" +log_info " 2. Test the application after updates" +log_info " 3. Schedule next maintenance in 1 week" +log_info " 4. Monitor for any issues" + +# Optional: Schedule next maintenance +log_info "๐Ÿ’ก Tip: Add this to your crontab for weekly maintenance:" +log_info "0 2 * * 0 cd $(pwd) && ./scripts/automation/maintenance.sh >> maintenance.log 2>&1" \ No newline at end of file diff --git a/scripts/automation/sync-ankita-fork.ps1 b/scripts/automation/sync-ankita-fork.ps1 new file mode 100644 index 00000000..a759e7a7 --- /dev/null +++ b/scripts/automation/sync-ankita-fork.ps1 @@ -0,0 +1,337 @@ +# Sync AnkitaParakh/AP2-shopping-concierge fork with upstream AP2 +# PowerShell version for Windows users + +param( + [switch]$Force, + [string]$Branch = "", + [switch]$Help +) + +# Configuration +$GITHUB_USERNAME = "AnkitaParakh" +$PRODUCT_REPO = "AP2-shopping-concierge" +$UPSTREAM_REPO = "https://github.com/google-agentic-commerce/AP2.git" +$FORK_REPO = "https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO.git" + +# Color output functions +function Write-Info { + param($Message) + Write-Host "[INFO] $Message" -ForegroundColor Blue +} + +function Write-Success { + param($Message) + Write-Host "[SUCCESS] $Message" -ForegroundColor Green +} + +function Write-Warning { + param($Message) + Write-Host "[WARNING] $Message" -ForegroundColor Yellow +} + +function Write-Error { + param($Message) + Write-Host "[ERROR] $Message" -ForegroundColor Red +} + +# Help function +function Show-Help { + Write-Host @" +Sync AnkitaParakh/AP2-shopping-concierge fork with upstream AP2 + +Usage: .\sync-ankita-fork.ps1 [OPTIONS] + +Options: + -Force Force sync even if working directory is not clean + -Branch Sync only specific branch + -Help Show this help message + +Examples: + .\sync-ankita-fork.ps1 # Sync all branches + .\sync-ankita-fork.ps1 -Branch main # Sync only main branch + .\sync-ankita-fork.ps1 -Force # Force sync with uncommitted changes + +"@ +} + +# Check if we're in the right directory +function Test-GitRepository { + if (-not (Test-Path "pyproject.toml") -or -not (Test-Path ".git")) { + Write-Error "Not in AP2 repository root. Please run from AP2 directory." + exit 1 + } +} + +# Check git status +function Test-GitStatus { + $status = git status --porcelain 2>$null + if ($status -and -not $Force) { + Write-Error "Working directory is not clean. Please commit or stash your changes, or use -Force." + git status --short + exit 1 + } +} + +# Sync a specific branch +function Sync-Branch { + param( + [string]$BranchName, + [bool]$CreateIfMissing = $false + ) + + Write-Info "Syncing branch: $BranchName" + + # Check if branch exists locally + $branchExists = git show-ref --verify --quiet "refs/heads/$BranchName" 2>$null + $LASTEXITCODE = 0 # Reset exit code + + if ($branchExists) { + Write-Info "Switching to existing branch: $BranchName" + git checkout $BranchName + if ($LASTEXITCODE -ne 0) { + Write-Error "Failed to checkout branch $BranchName" + return $false + } + } elseif ($CreateIfMissing) { + Write-Info "Creating new branch: $BranchName" + git checkout -b $BranchName + if ($LASTEXITCODE -ne 0) { + Write-Error "Failed to create branch $BranchName" + return $false + } + } else { + Write-Warning "Branch $BranchName doesn't exist locally. Skipping..." + return $true + } + + # Fetch latest changes from upstream + Write-Info "Fetching from upstream..." + git fetch upstream $BranchName + if ($LASTEXITCODE -ne 0) { + Write-Warning "Failed to fetch from upstream. Continuing anyway..." + } + + # Check if upstream branch exists + $upstreamExists = git show-ref --verify --quiet "refs/remotes/upstream/$BranchName" 2>$null + $LASTEXITCODE = 0 # Reset exit code + + if (-not $upstreamExists) { + Write-Warning "Upstream branch $BranchName doesn't exist. Skipping merge..." + return $true + } + + # Merge upstream changes + Write-Info "Merging upstream/$BranchName into $BranchName" + git merge "upstream/$BranchName" --no-edit + if ($LASTEXITCODE -ne 0) { + Write-Error "Merge conflicts detected. Please resolve manually." + Write-Info "After resolving conflicts, run:" + Write-Info " git add ." + Write-Info " git commit" + Write-Info " git push origin $BranchName" + return $false + } + + Write-Success "Successfully merged upstream changes" + + # Push to fork + Write-Info "Pushing to fork..." + git push origin $BranchName + if ($LASTEXITCODE -ne 0) { + Write-Warning "Failed to push to fork. You may need to force push or resolve conflicts." + return $false + } + + Write-Success "Successfully pushed to fork" + return $true +} + +# Check for new upstream branches +function Test-NewBranches { + Write-Info "Checking for new upstream branches..." + + # Fetch all upstream branches + git fetch upstream + + # Get list of upstream branches + $upstreamBranches = git branch -r | Where-Object { $_ -match 'upstream/' } | ForEach-Object { $_.Replace('upstream/', '').Trim() } | Where-Object { $_ -ne 'HEAD' } + $localBranches = git branch | ForEach-Object { $_.Replace('*', '').Trim() } + + foreach ($branch in $upstreamBranches) { + if ($branch -notin $localBranches) { + Write-Info "New upstream branch found: $branch" + $response = Read-Host "Do you want to create and sync this branch? (y/n)" + if ($response -eq 'y' -or $response -eq 'Y') { + Sync-Branch $branch $true + } + } + } +} + +# Show sync summary +function Show-Summary { + Write-Host "" + Write-Host "Sync Summary" -ForegroundColor Cyan + Write-Host "===============" -ForegroundColor Cyan + + # Show last commit from upstream + Write-Info "Latest upstream commit:" + $upstreamCommit = git log upstream/main --oneline -1 2>$null + if (-not $upstreamCommit) { + $upstreamCommit = git log upstream/master --oneline -1 2>$null + } + if ($upstreamCommit) { + Write-Host " $upstreamCommit" + } else { + Write-Host " No upstream commits found" + } + + # Show current branch status + Write-Host "" + Write-Info "Current branch status:" + $currentBranch = git branch --show-current + + $behindCount = 0 + $aheadCount = 0 + + try { + $behindCount = git rev-list --count "HEAD..upstream/$currentBranch" 2>$null + $aheadCount = git rev-list --count "upstream/$currentBranch..HEAD" 2>$null + } catch { + # Ignore errors + } + + if ($behindCount -eq 0 -and $aheadCount -eq 0) { + Write-Success "Your fork is up-to-date with upstream" + } elseif ($behindCount -gt 0) { + Write-Warning "Your fork is $behindCount commits behind upstream" + } elseif ($aheadCount -gt 0) { + Write-Info "Your fork is $aheadCount commits ahead of upstream" + } + + Write-Host "" + Write-Info "Repository URLs:" + Write-Info " Upstream: https://github.com/google-agentic-commerce/AP2" + Write-Info " Your Fork: https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO" + + Write-Host "" + Write-Info "Next steps:" + Write-Info " - Review changes: git log --oneline upstream/main..HEAD" + Write-Info " - Check status: git status" + Write-Info " - View differences: git diff upstream/main" +} + +# Main execution +function Main { + if ($Help) { + Show-Help + return + } + + Write-Host "Starting AP2 fork sync process..." -ForegroundColor Cyan + Write-Host "" + + # Display current status + Write-Host "AP2 Fork Sync Status" -ForegroundColor Cyan + Write-Host "=======================" -ForegroundColor Cyan + Write-Host "Upstream: $UPSTREAM_REPO" + Write-Host "Fork: $FORK_REPO" + Write-Host "Current directory: $(Get-Location)" + $currentBranch = git branch --show-current 2>$null + Write-Host "Current branch: $currentBranch" + Write-Host "" + + # Check if we're in the right directory + Test-GitRepository + + # Verify git remotes are set up correctly + Write-Info "Verifying git remotes..." + + $upstreamUrl = git remote get-url upstream 2>$null + $originUrl = git remote get-url origin 2>$null + + if (-not $upstreamUrl) { + Write-Info "Upstream remote not configured. Setting up..." + git remote add upstream $UPSTREAM_REPO + } elseif ($upstreamUrl -ne $UPSTREAM_REPO) { + Write-Warning "Upstream URL mismatch. Updating..." + git remote set-url upstream $UPSTREAM_REPO + } + + if (-not $originUrl) { + Write-Info "Origin remote not configured. Setting up..." + git remote add origin $FORK_REPO + } elseif ($originUrl -ne $FORK_REPO) { + Write-Warning "Origin URL mismatch. Updating..." + git remote set-url origin $FORK_REPO + } + + Write-Success "Git remotes configured correctly" + + # Check working directory status + Test-GitStatus + + # Store current branch + $originalBranch = git branch --show-current + + # If specific branch requested, sync only that branch + if ($Branch) { + $success = Sync-Branch $Branch $true + if (-not $success) { + exit 1 + } + } else { + # Sync main/master branch + $mainExists = git show-ref --verify --quiet "refs/remotes/upstream/main" 2>$null + $masterExists = git show-ref --verify --quiet "refs/remotes/upstream/master" 2>$null + + if ($mainExists) { + $success = Sync-Branch "main" $true + } elseif ($masterExists) { + $success = Sync-Branch "master" $true + } else { + Write-Error "No main or master branch found in upstream" + exit 1 + } + + if (-not $success) { + exit 1 + } + + # Check for and sync other important branches + $importantBranches = @("develop", "development", "staging", "production") + foreach ($branch in $importantBranches) { + $branchExists = git show-ref --verify --quiet "refs/remotes/upstream/$branch" 2>$null + $LASTEXITCODE = 0 # Reset exit code + + if ($branchExists) { + Write-Info "Found upstream branch: $branch" + $response = Read-Host "Sync $branch branch? (y/n)" + if ($response -eq 'y' -or $response -eq 'Y') { + Sync-Branch $branch $true + } + } + } + + # Check for new branches + Test-NewBranches + } + + # Return to original branch if it still exists + if ($originalBranch) { + $branchExists = git show-ref --verify --quiet "refs/heads/$originalBranch" 2>$null + $LASTEXITCODE = 0 # Reset exit code + + if ($branchExists) { + git checkout $originalBranch + } + } + + # Show summary + Show-Summary + + Write-Success "Fork sync completed successfully!" +} + +# Run main function +Main \ No newline at end of file diff --git a/scripts/automation/sync-ankita-fork.sh b/scripts/automation/sync-ankita-fork.sh new file mode 100644 index 00000000..71adc2b3 --- /dev/null +++ b/scripts/automation/sync-ankita-fork.sh @@ -0,0 +1,242 @@ +#!/bin/bash + +# Sync AnkitaParakh/AP2-shopping-concierge fork with upstream AP2 +# This script keeps your fork up-to-date with the latest changes from Google's AP2 repo + +set -e + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(dirname "$(dirname "$SCRIPT_DIR")")" +GITHUB_USERNAME="AnkitaParakh" +PRODUCT_REPO="AP2-shopping-concierge" +UPSTREAM_REPO="https://github.com/google-agentic-commerce/AP2.git" +FORK_REPO="https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO.git" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } +log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; } +log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +# Check if we're in the right directory +if [[ ! -f "$PROJECT_DIR/pyproject.toml" ]] || [[ ! -d "$PROJECT_DIR/.git" ]]; then + log_error "Not in AP2 repository root. Please run from AP2 directory." + exit 1 +fi + +cd "$PROJECT_DIR" + +# Display current status +echo "๐Ÿ”„ AP2 Fork Sync Status" +echo "=======================" +echo "Upstream: $UPSTREAM_REPO" +echo "Fork: $FORK_REPO" +echo "Current directory: $(pwd)" +echo "Current branch: $(git branch --show-current)" +echo "" + +# Function to check git status +check_git_status() { + if [[ -n "$(git status --porcelain)" ]]; then + log_error "Working directory is not clean. Please commit or stash your changes." + git status --short + exit 1 + fi +} + +# Function to sync a specific branch +sync_branch() { + local branch_name="$1" + local create_if_missing="${2:-false}" + + log_info "Syncing branch: $branch_name" + + # Check if branch exists locally + if git show-ref --verify --quiet "refs/heads/$branch_name"; then + log_info "Switching to existing branch: $branch_name" + git checkout "$branch_name" + elif [[ "$create_if_missing" == "true" ]]; then + log_info "Creating new branch: $branch_name" + git checkout -b "$branch_name" + else + log_warning "Branch $branch_name doesn't exist locally. Skipping..." + return 0 + fi + + # Fetch latest changes from upstream + log_info "Fetching from upstream..." + git fetch upstream "$branch_name" + + # Check if upstream branch exists + if ! git show-ref --verify --quiet "refs/remotes/upstream/$branch_name"; then + log_warning "Upstream branch $branch_name doesn't exist. Skipping merge..." + return 0 + fi + + # Merge upstream changes + log_info "Merging upstream/$branch_name into $branch_name" + if git merge "upstream/$branch_name" --no-edit; then + log_success "Successfully merged upstream changes" + + # Push to fork + log_info "Pushing to fork..." + if git push origin "$branch_name"; then + log_success "Successfully pushed to fork" + else + log_warning "Failed to push to fork. You may need to force push or resolve conflicts." + fi + else + log_error "Merge conflicts detected. Please resolve manually." + log_info "After resolving conflicts, run:" + log_info " git add ." + log_info " git commit" + log_info " git push origin $branch_name" + exit 1 + fi +} + +# Function to check for new upstream branches +check_new_branches() { + log_info "Checking for new upstream branches..." + + # Fetch all upstream branches + git fetch upstream + + # Get list of upstream branches + local upstream_branches=$(git branch -r | grep 'upstream/' | sed 's/upstream\///' | grep -v 'HEAD' | tr -d ' ') + local local_branches=$(git branch | sed 's/[* ]//g') + + for branch in $upstream_branches; do + if ! echo "$local_branches" | grep -q "^$branch$"; then + log_info "New upstream branch found: $branch" + read -p "Do you want to create and sync this branch? (y/n): " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + sync_branch "$branch" true + fi + fi + done +} + +# Function to show sync summary +show_summary() { + echo "" + echo "๐Ÿ“Š Sync Summary" + echo "===============" + + # Show last commit from upstream + log_info "Latest upstream commit:" + git log upstream/main --oneline -1 2>/dev/null || git log upstream/master --oneline -1 2>/dev/null || echo " No upstream commits found" + + # Show current branch status + echo "" + log_info "Current branch status:" + local current_branch=$(git branch --show-current) + local behind_count=$(git rev-list --count HEAD..upstream/$current_branch 2>/dev/null || echo "0") + local ahead_count=$(git rev-list --count upstream/$current_branch..HEAD 2>/dev/null || echo "0") + + if [[ "$behind_count" -eq 0 && "$ahead_count" -eq 0 ]]; then + log_success "Your fork is up-to-date with upstream" + elif [[ "$behind_count" -gt 0 ]]; then + log_warning "Your fork is $behind_count commits behind upstream" + elif [[ "$ahead_count" -gt 0 ]]; then + log_info "Your fork is $ahead_count commits ahead of upstream" + fi + + echo "" + log_info "Repository URLs:" + log_info " Upstream: https://github.com/google-agentic-commerce/AP2" + log_info " Your Fork: https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO" + + echo "" + log_info "Next steps:" + log_info " - Review changes: git log --oneline upstream/main..HEAD" + log_info " - Check status: git status" + log_info " - View differences: git diff upstream/main" +} + +# Main execution +main() { + echo "๐Ÿš€ Starting AP2 fork sync process..." + echo "" + + # Verify git remotes are set up correctly + log_info "Verifying git remotes..." + if ! git remote get-url upstream &>/dev/null; then + log_error "Upstream remote not configured. Setting up..." + git remote add upstream "$UPSTREAM_REPO" + fi + + if ! git remote get-url origin &>/dev/null; then + log_error "Origin remote not configured. Setting up..." + git remote add origin "$FORK_REPO" + fi + + # Verify remotes point to correct repositories + local upstream_url=$(git remote get-url upstream) + local origin_url=$(git remote get-url origin) + + if [[ "$upstream_url" != "$UPSTREAM_REPO" ]]; then + log_warning "Upstream URL mismatch. Updating..." + git remote set-url upstream "$UPSTREAM_REPO" + fi + + if [[ "$origin_url" != "$FORK_REPO" ]]; then + log_warning "Origin URL mismatch. Updating..." + git remote set-url origin "$FORK_REPO" + fi + + log_success "Git remotes configured correctly" + + # Check working directory status + check_git_status + + # Store current branch + local original_branch=$(git branch --show-current) + + # Sync main/master branch + if git show-ref --verify --quiet "refs/remotes/upstream/main"; then + sync_branch "main" true + elif git show-ref --verify --quiet "refs/remotes/upstream/master"; then + sync_branch "master" true + else + log_error "No main or master branch found in upstream" + exit 1 + fi + + # Check for and sync other important branches + for branch in develop development staging production; do + if git show-ref --verify --quiet "refs/remotes/upstream/$branch"; then + log_info "Found upstream branch: $branch" + read -p "Sync $branch branch? (y/n): " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + sync_branch "$branch" true + fi + fi + done + + # Check for new branches + check_new_branches + + # Return to original branch if it still exists + if [[ -n "$original_branch" ]] && git show-ref --verify --quiet "refs/heads/$original_branch"; then + git checkout "$original_branch" + fi + + # Show summary + show_summary + + log_success "Fork sync completed successfully! ๐ŸŽ‰" +} + +# Run main function +main "$@" \ No newline at end of file diff --git a/scripts/automation/sync-upstream.sh b/scripts/automation/sync-upstream.sh new file mode 100644 index 00000000..6aca3af0 --- /dev/null +++ b/scripts/automation/sync-upstream.sh @@ -0,0 +1,217 @@ +#!/bin/bash + +# AI Shopping Concierge - Upstream Sync Script +# Automatically syncs changes from Google's AP2 repository + +set -e + +echo "๐Ÿ”„ AI Shopping Concierge - Upstream Sync" +echo "=======================================" + +# Configuration +GITHUB_USERNAME="${1:-ankitap}" +PRODUCT_REPO="ai-shopping-concierge-ap2" +CURRENT_DIR="$(pwd)" +PRODUCT_DIR="../$PRODUCT_REPO" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}โ„น๏ธ $1${NC}" +} + +log_success() { + echo -e "${GREEN}โœ… $1${NC}" +} + +log_warning() { + echo -e "${YELLOW}โš ๏ธ $1${NC}" +} + +log_error() { + echo -e "${RED}โŒ $1${NC}" +} + +# Verify we're in the AP2 directory +if [[ ! -f "pyproject.toml" ]] || [[ ! -d ".git" ]]; then + log_error "Please run this script from the AP2 repository root directory" + exit 1 +fi + +log_success "Running from AP2 repository" + +# Step 1: Fetch latest changes from upstream +log_info "Fetching latest changes from upstream Google AP2 repository..." +git fetch upstream + +# Check if there are new changes +UPSTREAM_COMMITS=$(git rev-list HEAD..upstream/main --count) +if [[ "$UPSTREAM_COMMITS" -eq 0 ]]; then + log_success "Already up to date with upstream" +else + log_info "Found $UPSTREAM_COMMITS new commits from upstream" +fi + +# Step 2: Show what changed +if [[ "$UPSTREAM_COMMITS" -gt 0 ]]; then + echo + log_info "Recent changes from Google AP2:" + echo "================================" + git log --oneline --graph HEAD..upstream/main | head -10 + echo + + # Ask for confirmation + read -p "๐Ÿค” Do you want to merge these changes? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + log_warning "Sync cancelled by user" + exit 0 + fi +fi + +# Step 3: Merge upstream changes +if [[ "$UPSTREAM_COMMITS" -gt 0 ]]; then + log_info "Merging upstream changes..." + + # Switch to main branch + git checkout main + + # Merge upstream changes + if git merge upstream/main --no-edit; then + log_success "Successfully merged upstream changes" + else + log_error "Merge conflicts detected! Please resolve manually and run:" + log_error " git add ." + log_error " git commit" + log_error " ./scripts/automation/sync-upstream.sh" + exit 1 + fi + + # Push to your fork + log_info "Pushing changes to your fork..." + git push origin main + log_success "Fork updated successfully" +fi + +# Step 4: Update product repository submodule +if [[ -d "$PRODUCT_DIR" ]]; then + log_info "Updating product repository submodule..." + cd "$PRODUCT_DIR" + + # Update the AP2 submodule + if [[ -d "ap2-core" ]]; then + git submodule update --remote ap2-core + + # Check if submodule has changes + if [[ -n "$(git status --porcelain)" ]]; then + log_info "AP2 submodule has updates, committing..." + git add ap2-core + git commit -m "Update AP2 core to latest version + +$(git -C ap2-core log --oneline HEAD~$UPSTREAM_COMMITS..HEAD)" + + git push origin main + log_success "Product repository updated with latest AP2 core" + else + log_success "Product repository submodule already up to date" + fi + else + log_warning "AP2 submodule not found in product repository" + fi + + cd "$CURRENT_DIR" +else + log_warning "Product repository not found at: $PRODUCT_DIR" +fi + +# Step 5: Check for compatibility issues +log_info "Checking for compatibility issues..." + +# List of critical files to monitor for breaking changes +CRITICAL_FILES=( + "src/ap2/types/__init__.py" + "src/ap2/types/payment_request.py" + "src/ap2/types/mandate.py" + "src/ap2/types/contact_picker.py" +) + +BREAKING_CHANGES=false +for file in "${CRITICAL_FILES[@]}"; do + if git diff HEAD~$UPSTREAM_COMMITS..HEAD --quiet -- "$file" 2>/dev/null; then + continue + else + if [[ -f "$file" ]]; then + log_warning "Breaking change detected in: $file" + BREAKING_CHANGES=true + fi + fi +done + +if [[ "$BREAKING_CHANGES" == "true" ]]; then + echo + log_warning "โš ๏ธ BREAKING CHANGES DETECTED!" + log_warning "The following actions are recommended:" + log_warning "1. Review changes in critical AP2 files" + log_warning "2. Update your AI Shopping Concierge code accordingly" + log_warning "3. Run tests to ensure compatibility" + log_warning "4. Update documentation if needed" + echo + + if [[ -d "$PRODUCT_DIR" ]]; then + log_info "๐Ÿ’ก Quick compatibility check:" + log_info "cd $PRODUCT_DIR && python -m pytest tests/" + fi +else + log_success "No breaking changes detected" +fi + +# Step 6: Generate sync report +SYNC_REPORT="sync-report-$(date +%Y%m%d-%H%M%S).txt" +cat > "$SYNC_REPORT" << EOF +AI Shopping Concierge - Upstream Sync Report +Generated: $(date) + +Upstream Commits Merged: $UPSTREAM_COMMITS +Breaking Changes: $BREAKING_CHANGES + +Recent Changes: +$(git log --oneline HEAD~$UPSTREAM_COMMITS..HEAD) + +Critical Files Checked: +$(printf '%s\n' "${CRITICAL_FILES[@]}") + +Recommendations: +- Review merged changes for compatibility +- Run full test suite: python -m pytest tests/ +- Update documentation if AP2 APIs changed +- Deploy to staging for testing before production + +EOF + +log_success "Sync report generated: $SYNC_REPORT" + +echo +log_success "๐ŸŽ‰ Upstream sync completed successfully!" +echo "==================================" +echo +if [[ "$UPSTREAM_COMMITS" -gt 0 ]]; then + log_info "๐Ÿ“Š Summary:" + log_info " - Merged $UPSTREAM_COMMITS commits from Google AP2" + log_info " - Updated your fork: https://github.com/$GITHUB_USERNAME/AP2" + if [[ -d "$PRODUCT_DIR" ]]; then + log_info " - Updated product repository submodule" + fi + log_info " - Generated sync report: $SYNC_REPORT" + echo + log_info "๐Ÿ” Next steps:" + log_info " 1. Review the sync report" + log_info " 2. Test your AI Shopping Concierge" + log_info " 3. Deploy updates if everything looks good" +else + log_info "๐Ÿš€ Everything is up to date!" +fi \ No newline at end of file diff --git a/scripts/repository-setup/1-fork-and-setup.bat b/scripts/repository-setup/1-fork-and-setup.bat new file mode 100644 index 00000000..09bf3a2e --- /dev/null +++ b/scripts/repository-setup/1-fork-and-setup.bat @@ -0,0 +1,96 @@ +@echo off +REM AI Shopping Concierge - Repository Setup Script (Windows) +REM This script helps you fork the AP2 repo and set up the development environment + +echo ๐Ÿš€ AI Shopping Concierge - Repository Setup (Windows) +echo ======================================================= + +REM Configuration +set "UPSTREAM_REPO=https://github.com/google-agentic-commerce/AP2.git" +set "GITHUB_USERNAME=%1" +if "%GITHUB_USERNAME%"=="" set "GITHUB_USERNAME=ankitap" +set "PRODUCT_REPO=ai-shopping-concierge-ap2" +set "FORK_REPO=https://github.com/%GITHUB_USERNAME%/AP2.git" +set "PRODUCT_REPO_URL=https://github.com/%GITHUB_USERNAME%/%PRODUCT_REPO%.git" + +echo ๐Ÿ“‹ Configuration: +echo Upstream: %UPSTREAM_REPO% +echo Your Fork: %FORK_REPO% +echo Product Repo: %PRODUCT_REPO_URL% +echo. + +REM Step 1: Verify git is installed +git --version >nul 2>&1 +if %errorlevel% neq 0 ( + echo โŒ Git is not installed. Please install git first. + echo Download from: https://git-scm.com/download/windows + pause + exit /b 1 +) + +echo โœ… Git is available + +REM Step 2: Check if we're in the AP2 directory +if not exist "pyproject.toml" ( + echo โŒ Please run this script from the AP2 repository root directory + pause + exit /b 1 +) + +if not exist ".git" ( + echo โŒ This doesn't appear to be a git repository + pause + exit /b 1 +) + +echo โœ… Running from AP2 repository + +REM Step 3: Set up remotes for the forked AP2 repo +echo ๐Ÿ”ง Setting up git remotes... + +REM Add upstream remote (Google's original repo) +git remote get-url upstream >nul 2>&1 +if %errorlevel% equ 0 ( + echo โš ๏ธ Upstream remote already exists, updating... + git remote set-url upstream "%UPSTREAM_REPO%" +) else ( + echo โž• Adding upstream remote... + git remote add upstream "%UPSTREAM_REPO%" +) + +REM Update origin to point to your fork +echo ๐Ÿ”„ Updating origin to your fork... +git remote set-url origin "%FORK_REPO%" + +REM Verify remotes +echo โœ… Git remotes configured: +git remote -v + +echo. +echo ๐Ÿ“ MANUAL STEPS REQUIRED: +echo ========================= +echo. +echo 1. ๐Ÿด Fork the AP2 repository: +echo - Go to: https://github.com/google-agentic-commerce/AP2 +echo - Click 'Fork' button +echo - Choose your GitHub account (%GITHUB_USERNAME%) +echo. +echo 2. ๐Ÿ†• Create your product repository: +echo - Go to: https://github.com/new +echo - Repository name: %PRODUCT_REPO% +echo - Description: 'AI Shopping Concierge built on AP2 Protocol' +echo - Make it Public +echo - Add README, .gitignore (Python), and LICENSE +echo. +echo 3. ๐Ÿ”‘ Set up authentication: +echo - Configure SSH keys: https://docs.github.com/en/authentication/connecting-to-github-with-ssh +echo - Or use Personal Access Token: https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token +echo. +echo 4. โ–ถ๏ธ Run the next script: +echo scripts\repository-setup\2-sync-and-verify.bat +echo. + +echo ๐Ÿ’ก TIP: Make sure to replace '%GITHUB_USERNAME%' with your actual GitHub username! +echo. +echo Press any key to continue... +pause >nul \ No newline at end of file diff --git a/scripts/repository-setup/1-fork-and-setup.sh b/scripts/repository-setup/1-fork-and-setup.sh new file mode 100644 index 00000000..475c2da5 --- /dev/null +++ b/scripts/repository-setup/1-fork-and-setup.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +# AI Shopping Concierge - Repository Setup Script +# This script helps you fork the AP2 repo and set up the development environment + +set -e # Exit on any error + +echo "๐Ÿš€ AI Shopping Concierge - Repository Setup" +echo "==========================================" + +# Configuration +UPSTREAM_REPO="https://github.com/google-agentic-commerce/AP2.git" +GITHUB_USERNAME="${1:-ankitap}" # Replace with your GitHub username +PRODUCT_REPO="ai-shopping-concierge-ap2" +FORK_REPO="https://github.com/$GITHUB_USERNAME/AP2.git" +PRODUCT_REPO_URL="https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO.git" + +echo "๐Ÿ“‹ Configuration:" +echo " Upstream: $UPSTREAM_REPO" +echo " Your Fork: $FORK_REPO" +echo " Product Repo: $PRODUCT_REPO_URL" +echo + +# Step 1: Verify git is installed +if ! command -v git &> /dev/null; then + echo "โŒ Git is not installed. Please install git first." + exit 1 +fi + +echo "โœ… Git is available" + +# Step 2: Check if we're in the AP2 directory +if [[ ! -f "pyproject.toml" ]] || [[ ! -d ".git" ]]; then + echo "โŒ Please run this script from the AP2 repository root directory" + exit 1 +fi + +echo "โœ… Running from AP2 repository" + +# Step 3: Set up remotes for the forked AP2 repo +echo "๐Ÿ”ง Setting up git remotes..." + +# Add upstream remote (Google's original repo) +if git remote get-url upstream &> /dev/null; then + echo " โš ๏ธ Upstream remote already exists, updating..." + git remote set-url upstream "$UPSTREAM_REPO" +else + echo " โž• Adding upstream remote..." + git remote add upstream "$UPSTREAM_REPO" +fi + +# Update origin to point to your fork +echo " ๐Ÿ”„ Updating origin to your fork..." +git remote set-url origin "$FORK_REPO" + +# Verify remotes +echo "โœ… Git remotes configured:" +git remote -v + +echo +echo "๐Ÿ“ MANUAL STEPS REQUIRED:" +echo "========================" +echo +echo "1. ๐Ÿด Fork the AP2 repository:" +echo " - Go to: https://github.com/google-agentic-commerce/AP2" +echo " - Click 'Fork' button" +echo " - Choose your GitHub account ($GITHUB_USERNAME)" +echo +echo "2. ๐Ÿ†• Create your product repository:" +echo " - Go to: https://github.com/new" +echo " - Repository name: $PRODUCT_REPO" +echo " - Description: 'AI Shopping Concierge built on AP2 Protocol'" +echo " - Make it Public" +echo " - Add README, .gitignore (Python), and LICENSE" +echo +echo "3. ๐Ÿ”‘ Set up authentication:" +echo " - Configure SSH keys: https://docs.github.com/en/authentication/connecting-to-github-with-ssh" +echo " - Or use Personal Access Token: https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token" +echo +echo "4. โ–ถ๏ธ Run the next script:" +echo " ./scripts/repository-setup/2-sync-and-verify.sh" +echo + +echo "๐Ÿ’ก TIP: Make sure to replace '$GITHUB_USERNAME' with your actual GitHub username!" \ No newline at end of file diff --git a/scripts/repository-setup/2-sync-and-verify.sh b/scripts/repository-setup/2-sync-and-verify.sh new file mode 100644 index 00000000..33f29bc2 --- /dev/null +++ b/scripts/repository-setup/2-sync-and-verify.sh @@ -0,0 +1,199 @@ +#!/bin/bash + +# AI Shopping Concierge - Sync and Verify Script +# Run this after completing the manual GitHub steps + +set -e + +echo "๐Ÿ”„ AI Shopping Concierge - Sync and Verify" +echo "==========================================" + +# Configuration +GITHUB_USERNAME="${1:-ankitap}" +PRODUCT_REPO="ai-shopping-concierge-ap2" + +echo "๐Ÿ“‹ Verifying setup for: $GITHUB_USERNAME" +echo + +# Step 1: Fetch from upstream +echo "๐Ÿ”„ Fetching latest changes from upstream..." +git fetch upstream + +# Step 2: Create and switch to development branch +echo "๐ŸŒฟ Creating development branch..." +if git show-ref --verify --quiet refs/heads/ai-shopping-concierge; then + echo " โš ๏ธ Branch 'ai-shopping-concierge' already exists, switching..." + git checkout ai-shopping-concierge +else + echo " โž• Creating new branch 'ai-shopping-concierge'..." + git checkout -b ai-shopping-concierge upstream/main +fi + +# Step 3: Test connection to your fork +echo "๐Ÿงช Testing connection to your fork..." +if git ls-remote origin &> /dev/null; then + echo "โœ… Successfully connected to your fork" +else + echo "โŒ Cannot connect to your fork. Please check:" + echo " - Your fork exists at: https://github.com/$GITHUB_USERNAME/AP2" + echo " - Your authentication is set up (SSH keys or token)" + echo " - Your internet connection" + exit 1 +fi + +# Step 4: Push development branch to your fork +echo "โฌ†๏ธ Pushing development branch to your fork..." +git push -u origin ai-shopping-concierge + +# Step 5: Clone your product repository +echo "๐Ÿ“ฆ Setting up your product repository..." +PRODUCT_DIR="../$PRODUCT_REPO" + +if [[ -d "$PRODUCT_DIR" ]]; then + echo " โš ๏ธ Product repository directory already exists" + cd "$PRODUCT_DIR" + git pull origin main +else + echo " ๐Ÿ“ฅ Cloning your product repository..." + cd .. + git clone "https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO.git" + cd "$PRODUCT_REPO" +fi + +# Initialize basic structure +echo "๐Ÿ—๏ธ Initializing product structure..." +mkdir -p {ai-shopping-agent,deployment,docs,examples,tests} +mkdir -p ai-shopping-agent/{whatsapp-integration,ai-curation,negotiation-engine,checkout-optimizer,analytics} + +# Create basic files if they don't exist +if [[ ! -f "README.md" ]]; then + cat > README.md << 'EOF' +# AI Shopping Concierge (AP2) + +An intelligent shopping assistant built on the AP2 (Agentic Protocol 2) platform. + +## Features +- ๐Ÿค– AI-powered product curation +- ๐Ÿ’ฌ Multi-channel chat (WhatsApp, Web) +- ๐Ÿ’ฐ Smart negotiation and bundling +- ๐Ÿ’ณ Automated payment processing with currency conversion +- ๐Ÿ“Š Advanced analytics and insights + +## Quick Start +```bash +# Install dependencies +pip install -r requirements.txt + +# Start the shopping agent +python -m ai_shopping_agent +``` + +## Documentation +- [Getting Started](docs/getting-started.md) +- [API Reference](docs/api-reference.md) +- [Deployment Guide](docs/deployment.md) + +## Built With +- [AP2 Protocol](https://github.com/google-agentic-commerce/AP2) - Core payment and commerce infrastructure +- FastAPI - Web framework +- Google AI - Language models +- WhatsApp Business API - Messaging + +## License +Apache 2.0 - see LICENSE file +EOF +fi + +# Create gitignore if it doesn't exist +if [[ ! -f ".gitignore" ]]; then + cat > .gitignore << 'EOF' +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# Virtual environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Project specific +config/secrets.yaml +logs/ +.coverage +htmlcov/ + +# AP2 Core (will be added as submodule) +ap2-core/ +EOF +fi + +echo "โœ… Product repository initialized" + +# Step 6: Add AP2 as submodule +echo "๐Ÿ”— Adding AP2 core as submodule..." +if [[ ! -d "ap2-core" ]]; then + git submodule add "https://github.com/$GITHUB_USERNAME/AP2.git" ap2-core + git submodule update --init --recursive + echo "โœ… AP2 core added as submodule" +else + echo " โš ๏ธ AP2 submodule already exists" +fi + +# Commit initial structure +if [[ -n "$(git status --porcelain)" ]]; then + git add . + git commit -m "Initial AI Shopping Concierge structure with AP2 submodule" + git push origin main + echo "โœ… Initial structure committed and pushed" +fi + +echo +echo "๐ŸŽ‰ SUCCESS! Repository setup completed!" +echo "======================================" +echo +echo "๐Ÿ“ Your repositories:" +echo " Fork: https://github.com/$GITHUB_USERNAME/AP2" +echo " Product: https://github.com/$GITHUB_USERNAME/$PRODUCT_REPO" +echo +echo "๐Ÿš€ Next steps:" +echo " 1. cd ../$PRODUCT_REPO" +echo " 2. Run: ../AP2/scripts/repository-setup/3-migrate-code.sh" +echo " 3. Start developing your AI Shopping Concierge!" +echo + +cd "../AP2" # Return to original directory +echo "๐Ÿ“ Returned to AP2 directory: $(pwd)" \ No newline at end of file diff --git a/scripts/repository-setup/3-migrate-code.sh b/scripts/repository-setup/3-migrate-code.sh new file mode 100644 index 00000000..c5a90e82 --- /dev/null +++ b/scripts/repository-setup/3-migrate-code.sh @@ -0,0 +1,515 @@ +#!/bin/bash + +# AI Shopping Concierge - Code Migration Script +# Migrates AI shopping agent code to the product repository + +set -e + +echo "๐Ÿ“ฆ AI Shopping Concierge - Code Migration" +echo "========================================" + +# Configuration +GITHUB_USERNAME="${1:-ankitap}" +PRODUCT_REPO="ai-shopping-concierge-ap2" +PRODUCT_DIR="../$PRODUCT_REPO" +AP2_DIR="$(pwd)" + +# Verify we're in the right place +if [[ ! -f "pyproject.toml" ]] || [[ ! -d ".git" ]]; then + echo "โŒ Please run this script from the AP2 repository root directory" + exit 1 +fi + +if [[ ! -d "$PRODUCT_DIR" ]]; then + echo "โŒ Product repository not found at: $PRODUCT_DIR" + echo " Please run: ./scripts/repository-setup/2-sync-and-verify.sh first" + exit 1 +fi + +echo "โœ… Directories verified" + +# Step 1: Copy AI Shopping Agent code +echo "๐Ÿ“‹ Migrating AI Shopping Agent code..." + +# Create target directories +cd "$PRODUCT_DIR" +mkdir -p ai-shopping-agent/{whatsapp-integration,ai-curation,negotiation-engine,checkout-optimizer,analytics,common} + +# Copy the enhanced modules we created +echo " ๐Ÿ“„ Copying enhanced modules..." + +# WhatsApp Integration +if [[ -f "$AP2_DIR/samples/python/src/channels/whatsapp_integration.py" ]]; then + cp "$AP2_DIR/samples/python/src/channels/whatsapp_integration.py" "ai-shopping-agent/whatsapp-integration/" + echo " โœ… WhatsApp integration copied" +fi + +# AI Curation +if [[ -f "$AP2_DIR/samples/python/src/ai_curation/smart_curation_engine.py" ]]; then + cp "$AP2_DIR/samples/python/src/ai_curation/smart_curation_engine.py" "ai-shopping-agent/ai-curation/" +fi + +if [[ -f "$AP2_DIR/samples/python/src/ai_curation/negotiation_engine.py" ]]; then + cp "$AP2_DIR/samples/python/src/ai_curation/negotiation_engine.py" "ai-shopping-agent/negotiation-engine/" +fi + +# Unified Chat Manager +if [[ -f "$AP2_DIR/samples/python/src/channels/unified_chat_manager.py" ]]; then + cp "$AP2_DIR/samples/python/src/channels/unified_chat_manager.py" "ai-shopping-agent/whatsapp-integration/" +fi + +# Checkout Optimizer +if [[ -f "$AP2_DIR/samples/python/src/optimization/checkout_optimizer.py" ]]; then + cp "$AP2_DIR/samples/python/src/optimization/checkout_optimizer.py" "ai-shopping-agent/checkout-optimizer/" + echo " โœ… Enhanced checkout optimizer with payment processing copied" +fi + +# Analytics +if [[ -f "$AP2_DIR/samples/python/src/analytics/performance_analytics.py" ]]; then + cp "$AP2_DIR/samples/python/src/analytics/performance_analytics.py" "ai-shopping-agent/analytics/" +fi + +# Common utilities +if [[ -d "$AP2_DIR/samples/python/src/common" ]]; then + cp -r "$AP2_DIR/samples/python/src/common/"* "ai-shopping-agent/common/" 2>/dev/null || true +fi + +echo " โœ… AI Shopping Agent modules migrated" + +# Step 2: Create main application entry point +echo "๐Ÿš€ Creating application entry point..." + +cat > ai-shopping-agent/__init__.py << 'EOF' +""" +AI Shopping Concierge +Built on AP2 Protocol + +An intelligent shopping assistant that provides: +- Multi-channel chat support (WhatsApp, Web) +- AI-powered product curation and recommendations +- Smart negotiation and dynamic pricing +- Automated payment processing with currency conversion +- Advanced analytics and insights +""" + +__version__ = "1.0.0" +__author__ = "AI Shopping Concierge Team" + +from .whatsapp_integration import WhatsAppShoppingAgent +from .smart_curation_engine import SmartCurationEngine +from .negotiation_engine import NegotiationEngine +from .checkout_optimizer import ConversionOptimizer +from .performance_analytics import AnalyticsEngine + +__all__ = [ + "WhatsAppShoppingAgent", + "SmartCurationEngine", + "NegotiationEngine", + "ConversionOptimizer", + "AnalyticsEngine" +] +EOF + +cat > ai-shopping-agent/__main__.py << 'EOF' +""" +AI Shopping Concierge - Main Application Entry Point +""" + +import asyncio +import logging +import sys +from pathlib import Path + +# Add the AP2 core to Python path +sys.path.insert(0, str(Path(__file__).parent.parent / "ap2-core" / "samples" / "python" / "src")) + +from ai_shopping_agent.whatsapp_integration.whatsapp_integration import WhatsAppShoppingAgent +from ai_shopping_agent.ai_curation.smart_curation_engine import SmartCurationEngine +from ai_shopping_agent.negotiation_engine.negotiation_engine import NegotiationEngine +from ai_shopping_agent.checkout_optimizer.checkout_optimizer import ConversionOptimizer +from ai_shopping_agent.analytics.performance_analytics import AnalyticsEngine + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) + +logger = logging.getLogger(__name__) + +class AIShoppingConcierge: + """Main AI Shopping Concierge application.""" + + def __init__(self): + self.whatsapp_agent = WhatsAppShoppingAgent() + self.curation_engine = SmartCurationEngine() + self.negotiation_engine = NegotiationEngine() + self.checkout_optimizer = ConversionOptimizer() + self.analytics_engine = AnalyticsEngine() + + async def start(self): + """Start the AI Shopping Concierge.""" + logger.info("๐Ÿš€ Starting AI Shopping Concierge...") + + # Initialize all components + await self.whatsapp_agent.initialize() + await self.curation_engine.initialize() + await self.negotiation_engine.initialize() + await self.analytics_engine.initialize() + + logger.info("โœ… AI Shopping Concierge started successfully!") + logger.info("๐Ÿ’ฌ WhatsApp integration ready") + logger.info("๐Ÿค– AI curation engine ready") + logger.info("๐Ÿ’ฐ Negotiation engine ready") + logger.info("๐Ÿ’ณ Checkout optimizer ready") + logger.info("๐Ÿ“Š Analytics engine ready") + + # Keep the application running + try: + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.info("๐Ÿ›‘ Shutting down AI Shopping Concierge...") + + async def shutdown(self): + """Gracefully shutdown the application.""" + logger.info("๐Ÿ”„ Shutting down components...") + # Add cleanup logic here + logger.info("โœ… Shutdown complete") + +async def main(): + """Main application entry point.""" + app = AIShoppingConcierge() + try: + await app.start() + finally: + await app.shutdown() + +if __name__ == "__main__": + asyncio.run(main()) +EOF + +echo " โœ… Application entry point created" + +# Step 3: Create requirements.txt +echo "๐Ÿ“ฆ Creating requirements.txt..." + +cat > requirements.txt << 'EOF' +# AI Shopping Concierge Dependencies + +# Core framework +fastapi>=0.104.0 +uvicorn[standard]>=0.24.0 +pydantic>=2.5.0 + +# HTTP client +aiohttp>=3.9.0 +httpx>=0.25.0 + +# Google AI +google-generativeai>=0.3.0 + +# WhatsApp Business API +twilio>=8.10.0 + +# Data processing +pandas>=2.1.0 +numpy>=1.25.0 + +# Database +sqlalchemy>=2.0.0 +asyncpg>=0.29.0 # PostgreSQL +redis>=5.0.0 + +# Currency conversion +forex-python>=1.8 + +# Background tasks +celery>=5.3.0 + +# Monitoring and logging +prometheus-client>=0.19.0 +structlog>=23.2.0 + +# Testing +pytest>=7.4.0 +pytest-asyncio>=0.21.0 +httpx>=0.25.0 # For testing + +# Development +black>=23.11.0 +isort>=5.12.0 +mypy>=1.7.0 + +# Security +python-jose[cryptography]>=3.3.0 +passlib[bcrypt]>=1.7.4 + +# Configuration +python-dotenv>=1.0.0 +pyyaml>=6.0.1 + +# AP2 Core (as git submodule) +# See ap2-core/ directory +EOF + +echo " โœ… Requirements file created" + +# Step 4: Create configuration files +echo "โš™๏ธ Creating configuration files..." + +mkdir -p config + +cat > config/app.yaml << 'EOF' +# AI Shopping Concierge Configuration + +app: + name: "AI Shopping Concierge" + version: "1.0.0" + debug: false + host: "0.0.0.0" + port: 8000 + +# WhatsApp Configuration +whatsapp: + verify_token: "${WHATSAPP_VERIFY_TOKEN}" + access_token: "${WHATSAPP_ACCESS_TOKEN}" + phone_number_id: "${WHATSAPP_PHONE_NUMBER_ID}" + webhook_url: "${WHATSAPP_WEBHOOK_URL}" + +# Google AI Configuration +google_ai: + api_key: "${GOOGLE_AI_API_KEY}" + model: "gemini-pro" + max_tokens: 1000 + +# Database Configuration +database: + url: "${DATABASE_URL}" + echo: false + pool_size: 10 + max_overflow: 20 + +# Redis Configuration +redis: + url: "${REDIS_URL}" + max_connections: 20 + +# Payment Processing +payment: + default_currency: "USD" + supported_currencies: ["USD", "EUR", "GBP", "JPY", "CAD", "AUD", "CHF", "CNY", "INR", "BRL"] + + processors: + ap2: + merchant_id: "${AP2_MERCHANT_ID}" + api_endpoint: "https://ap2.googleapis.com/v1" + api_key: "${AP2_API_KEY}" + + stripe: + api_key: "${STRIPE_API_KEY}" + webhook_secret: "${STRIPE_WEBHOOK_SECRET}" + + paypal: + client_id: "${PAYPAL_CLIENT_ID}" + client_secret: "${PAYPAL_CLIENT_SECRET}" + +# Analytics +analytics: + enabled: true + retention_days: 90 + export_format: "json" + +# Security +security: + secret_key: "${SECRET_KEY}" + algorithm: "HS256" + access_token_expire_minutes: 30 + +# Logging +logging: + level: "INFO" + format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + file: "logs/app.log" +EOF + +cat > config/secrets.yaml.example << 'EOF' +# AI Shopping Concierge - Secrets Configuration +# Copy this file to secrets.yaml and fill in your actual values + +# WhatsApp Business API +WHATSAPP_VERIFY_TOKEN: "your_verify_token_here" +WHATSAPP_ACCESS_TOKEN: "your_access_token_here" +WHATSAPP_PHONE_NUMBER_ID: "your_phone_number_id_here" +WHATSAPP_WEBHOOK_URL: "https://your-domain.com/webhook/whatsapp" + +# Google AI +GOOGLE_AI_API_KEY: "your_google_ai_api_key_here" + +# Database +DATABASE_URL: "postgresql+asyncpg://user:password@localhost/ai_shopping_concierge" + +# Redis +REDIS_URL: "redis://localhost:6379/0" + +# Payment Processors +AP2_MERCHANT_ID: "your_ap2_merchant_id" +AP2_API_KEY: "your_ap2_api_key" +STRIPE_API_KEY: "sk_test_your_stripe_key" +STRIPE_WEBHOOK_SECRET: "whsec_your_webhook_secret" +PAYPAL_CLIENT_ID: "your_paypal_client_id" +PAYPAL_CLIENT_SECRET: "your_paypal_client_secret" + +# Security +SECRET_KEY: "your_super_secret_key_change_this_in_production" + +# Environment +ENVIRONMENT: "development" # development, staging, production +EOF + +echo " โœ… Configuration files created" + +# Step 5: Create Docker configuration +echo "๐Ÿณ Creating Docker configuration..." + +cat > Dockerfile << 'EOF' +# AI Shopping Concierge Dockerfile + +FROM python:3.11-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + git \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements first for better caching +COPY requirements.txt . + +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Initialize AP2 submodule +RUN git submodule update --init --recursive + +# Create logs directory +RUN mkdir -p logs + +# Expose port +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8000/health || exit 1 + +# Run application +CMD ["python", "-m", "ai_shopping_agent"] +EOF + +cat > docker-compose.yml << 'EOF' +# AI Shopping Concierge - Docker Compose + +version: '3.8' + +services: + ai-shopping-concierge: + build: . + ports: + - "8000:8000" + environment: + - DATABASE_URL=postgresql+asyncpg://postgres:password@db:5432/ai_shopping_concierge + - REDIS_URL=redis://redis:6379/0 + depends_on: + - db + - redis + volumes: + - ./config:/app/config + - ./logs:/app/logs + restart: unless-stopped + + db: + image: postgres:15 + environment: + POSTGRES_DB: ai_shopping_concierge + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + volumes: + - postgres_data:/var/lib/postgresql/data + ports: + - "5432:5432" + + redis: + image: redis:7-alpine + ports: + - "6379:6379" + volumes: + - redis_data:/data + + nginx: + image: nginx:alpine + ports: + - "80:80" + - "443:443" + volumes: + - ./deployment/nginx.conf:/etc/nginx/nginx.conf + - ./deployment/ssl:/etc/ssl + depends_on: + - ai-shopping-concierge + +volumes: + postgres_data: + redis_data: +EOF + +echo " โœ… Docker configuration created" + +# Step 6: Commit changes +echo "๐Ÿ’พ Committing migrated code..." + +# Add all files +git add . + +# Check if there are changes to commit +if [[ -n "$(git status --porcelain)" ]]; then + git commit -m "Migrate AI Shopping Concierge code from AP2 samples + +- Added enhanced WhatsApp integration +- Added AI curation and negotiation engines +- Added checkout optimizer with payment processing +- Added analytics engine +- Added application entry point and configuration +- Added Docker and deployment configuration" + + git push origin main + echo "โœ… Changes committed and pushed" +else + echo " โ„น๏ธ No changes to commit" +fi + +echo +echo "๐ŸŽ‰ SUCCESS! Code migration completed!" +echo "====================================" +echo +echo "๐Ÿ“ Your AI Shopping Concierge is ready at:" +echo " $(pwd)" +echo +echo "๐Ÿš€ Quick start:" +echo " 1. cp config/secrets.yaml.example config/secrets.yaml" +echo " 2. Edit config/secrets.yaml with your API keys" +echo " 3. pip install -r requirements.txt" +echo " 4. python -m ai_shopping_agent" +echo +echo "๐Ÿณ Or using Docker:" +echo " docker-compose up --build" +echo +echo "๐Ÿ“– Next: Create documentation with:" +echo " ../AP2/scripts/repository-setup/4-create-docs.sh" +echo + +# Return to AP2 directory +cd "$AP2_DIR" \ No newline at end of file