diff --git a/AI Research Assistant (2).mp4 b/AI Research Assistant (2).mp4 new file mode 100644 index 0000000..e685de8 Binary files /dev/null and b/AI Research Assistant (2).mp4 differ diff --git a/Abdullah_Ansari/README.md b/Abdullah_Ansari/README.md deleted file mode 100644 index 6536beb..0000000 --- a/Abdullah_Ansari/README.md +++ /dev/null @@ -1 +0,0 @@ -# Abdullah_Ansari diff --git a/Aditya_Bajpai/README.md b/Aditya_Bajpai/README.md deleted file mode 100644 index f2e6c12..0000000 --- a/Aditya_Bajpai/README.md +++ /dev/null @@ -1 +0,0 @@ -# Aditya_Bajpai diff --git a/Ajay_Kakadia/README.md b/Ajay_Kakadia/README.md deleted file mode 100644 index a6ff4b2..0000000 --- a/Ajay_Kakadia/README.md +++ /dev/null @@ -1 +0,0 @@ -# Ajay_Kakadia diff --git a/Amit_Khandelwal/README.md b/Amit_Khandelwal/README.md deleted file mode 100644 index 037c51d..0000000 --- a/Amit_Khandelwal/README.md +++ /dev/null @@ -1 +0,0 @@ -# Amit_Khandelwal diff --git a/Anindita_A_Sarkar/README.md b/Anindita_A_Sarkar/README.md deleted file mode 100644 index 603bde4..0000000 --- a/Anindita_A_Sarkar/README.md +++ /dev/null @@ -1 +0,0 @@ -# Anindita_A_Sarkar diff --git a/Asheesh_Ranjan_Srivastava/Day-2/.env.example b/Asheesh_Ranjan_Srivastava/Day-2/.env.example deleted file mode 100644 index 4fbcf60..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/.env.example +++ /dev/null @@ -1,66 +0,0 @@ -# Text Summarization MVP - Environment Variables -# Copy this file to .env and fill in your values - -# ============================================ -# HuggingFace Configuration (Optional) -# ============================================ -# Get your token from: https://huggingface.co/settings/tokens -# Required only for: -# - Gated models (e.g., Llama, Mistral) -# - Private repositories -# - Higher API rate limits -HF_TOKEN=your_huggingface_token_here - -# ============================================ -# Model Cache Directory (Optional) -# ============================================ -# Where to store downloaded models -# Default: ./model_cache -# Change if you want to share cache across projects -CACHE_DIR=./model_cache - -# ============================================ -# Export Directory (Optional) -# ============================================ -# Where to save exported files -# Default: ./exports -EXPORT_DIR=./exports - -# ============================================ -# GPU Configuration (Optional) -# ============================================ -# Force CPU even if GPU is available (for testing) -# Options: cuda, cpu, mps (Apple Silicon) -# Default: auto-detect -# DEVICE=cpu - -# ============================================ -# Application Settings (Optional) -# ============================================ -# Enable/disable caching -# Default: True -ENABLE_CACHE=True - -# Maximum cache size (number of summaries) -# Default: 100 -MAX_CACHE_SIZE=100 - -# ============================================ -# Gradio Settings (Optional) -# ============================================ -# Server configuration -# GRADIO_SERVER_NAME=0.0.0.0 -# GRADIO_SERVER_PORT=7860 - -# Share link (creates public URL) -# Options: True, False -# Default: False (local only) -# GRADIO_SHARE=False - -# ============================================ -# Notes -# ============================================ -# - Most settings have sensible defaults -# - HF_TOKEN is optional for public models -# - Create .env by copying this file: cp .env.example .env -# - Never commit .env to git (already in .gitignore) diff --git a/Asheesh_Ranjan_Srivastava/Day-2/.gitignore b/Asheesh_Ranjan_Srivastava/Day-2/.gitignore deleted file mode 100644 index 6c605de..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/.gitignore +++ /dev/null @@ -1,180 +0,0 @@ -# Text Summarization MVP - Git Ignore -# AI Engineering Bootcamp - Day 2 - -# ============================================ -# Python -# ============================================ -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# ============================================ -# Jupyter Notebook -# ============================================ -.ipynb_checkpoints -*/.ipynb_checkpoints/* -*.ipynb_checkpoints - -# ============================================ -# Environment & Secrets -# ============================================ -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ -*.env - -# HuggingFace Token -.huggingface/ -.hf_token - -# ============================================ -# ML/AI Specific -# ============================================ - -# Downloaded Models (can be large!) -model_cache/ -models/ -*.bin -*.ckpt -*.pth -*.pt -pytorch_model.bin -tf_model.h5 - -# HuggingFace Cache -.cache/ -.transformers_cache/ - -# Training outputs -runs/ -logs/ -checkpoints/ -training_output/ - -# ============================================ -# Exports (Generated Files) -# ============================================ -exports/ -output/ -*.md -!README.md -!ARCHITECTURE.md -!SETUP.md -!CONTRIBUTING.md -!LICENSE.md -!docs/*.md - -# Export formats -*.mp3 -*.wav -exports/*.json -exports/*.pdf - -# ============================================ -# IDEs & Editors -# ============================================ - -# VS Code -.vscode/ -*.code-workspace - -# PyCharm -.idea/ -*.iml - -# Sublime Text -*.sublime-project -*.sublime-workspace - -# ============================================ -# OS Specific -# ============================================ - -# macOS -.DS_Store -.AppleDouble -.LSOverride - -# Windows -Thumbs.db -ehthumbs.db -Desktop.ini -$RECYCLE.BIN/ - -# Linux -*~ -.directory -.Trash-* - -# ============================================ -# Gradio Specific -# ============================================ -flagged/ -gradio_queue.db -gradio_flagged/ - -# ============================================ -# Testing & Coverage -# ============================================ -.pytest_cache/ -.coverage -htmlcov/ -.tox/ -.mypy_cache/ -.dmypy.json -dmypy.json - -# ============================================ -# Documentation Build -# ============================================ -docs/_build/ -site/ - -# ============================================ -# Temporary Files -# ============================================ -*.log -*.tmp -*.temp -tmp/ -temp/ - -# ============================================ -# Jupyter Lab -# ============================================ -.jupyter/ -.local/ - -# ============================================ -# IMPORTANT: Keep Sample Files -# ============================================ -# Sample outputs for demo purposes (small files only) -!examples/sample_outputs/*.md -!examples/sample_outputs/*.json -!examples/sample_outputs/*.mp3 - -# Keep documentation images -!docs/screenshots/*.png -!docs/screenshots/*.jpg -!docs/screenshots/*.gif diff --git a/Asheesh_Ranjan_Srivastava/Day-2/ARCHITECTURE.md b/Asheesh_Ranjan_Srivastava/Day-2/ARCHITECTURE.md deleted file mode 100644 index 61816ae..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/ARCHITECTURE.md +++ /dev/null @@ -1,715 +0,0 @@ -# ๐Ÿ“ Architecture Documentation - -> **Technical Deep-Dive**: Text Summarization MVP -> **Version**: 1.0.0 -> **Last Updated**: October 29, 2025 - ---- - -## ๐Ÿ“‹ Table of Contents - -1. [System Overview](#system-overview) -2. [Component Architecture](#component-architecture) -3. [Data Flow](#data-flow) -4. [Design Patterns](#design-patterns) -5. [Model Pipeline](#model-pipeline) -6. [Caching Strategy](#caching-strategy) -7. [Export System](#export-system) -8. [Error Handling](#error-handling) -9. [Performance Optimization](#performance-optimization) -10. [Security Considerations](#security-considerations) - ---- - -## ๐ŸŽฏ System Overview - -### High-Level Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ USER INTERFACE โ”‚ -โ”‚ (Gradio Multi-Tab) โ”‚ -โ”‚ [Summarize] [Export] [Help] โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ APPLICATION LAYER โ”‚ -โ”‚ โ”‚ -โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ -โ”‚ โ”‚ SummarizationEngine โ”‚ โ”‚ -โ”‚ โ”‚ (Orchestrates all components) โ”‚ โ”‚ -โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ -โ”‚ โ”‚ โ”‚ -โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ -โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ -โ”‚ โ–ผ โ–ผ โ–ผ โ–ผ โ”‚ -โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ -โ”‚ โ”‚ Model โ”‚ โ”‚ Cache โ”‚ โ”‚ Text โ”‚ โ”‚Exportโ”‚ โ”‚ -โ”‚ โ”‚ Manager โ”‚ โ”‚ Manager โ”‚ โ”‚Processor โ”‚ โ”‚Mgr โ”‚ โ”‚ -โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ INFRASTRUCTURE LAYER โ”‚ -โ”‚ โ”‚ -โ”‚ [Transformers] [PyTorch] [File System] [gTTS] โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### Technology Stack - -| Layer | Technologies | -|-------|--------------| -| **Frontend** | Gradio 5.0+ (Python-based UI) | -| **Application** | Python 3.8+ (OOP Design) | -| **ML Framework** | PyTorch 2.0+, Transformers 4.57+ | -| **Models** | BART, T5, Pegasus (HuggingFace) | -| **Export** | gTTS, ReportLab, Markdown, JSON | -| **Storage** | File System (cache + exports) | -| **Deployment** | Jupyter, Colab, HF Spaces | - ---- - -## ๐Ÿ—๏ธ Component Architecture - -### 1. **Configuration Manager** (`Config` class) - -**Purpose**: Centralized application configuration - -```python -class Config: - HF_TOKEN = os.environ.get('HF_TOKEN', None) - EXPORT_DIR = "./exports" - CACHE_DIR = "./model_cache" - DEVICE = "cuda" if torch.cuda.is_available() else "cpu" - - MODELS = { - "facebook/bart-large-cnn": { - "name": "BART (CNN/DailyMail)", - "max_input_length": 1024, - "max_output_length": 142 - } - } -``` - -**Design Benefits**: -- โœ… Single source of truth -- โœ… Easy to modify parameters -- โœ… Environment-aware configuration -- โœ… Type safety via class attributes - ---- - -### 2. **Model Manager** (`ModelManager` class) - -**Responsibilities**: -- Load and cache Transformers models -- Manage model lifecycle -- Handle GPU/CPU allocation -- Optimize memory usage - -**Key Methods**: - -```python -class ModelManager: - def __init__(self): - self.loaded_models = {} # In-memory cache - - def load_model(self, model_id: str) -> Tuple[Tokenizer, Model]: - """ - Lazy loading pattern: - 1. Check if model already in memory - 2. If not, download from HuggingFace - 3. Cache for subsequent calls - 4. Return tokenizer + model - """ - if model_id in self.loaded_models: - return self.loaded_models[model_id] - - # Load and cache... - tokenizer = AutoTokenizer.from_pretrained(model_id) - model = AutoModelForSeq2SeqLM.from_pretrained(model_id) - - self.loaded_models[model_id] = (tokenizer, model) - return tokenizer, model -``` - -**Performance Optimizations**: -- Mixed precision (FP16 on GPU, FP32 on CPU) -- Device mapping (`device_map="auto"`) -- Disk caching (`cache_dir`) - ---- - -### 3. **Cache Manager** (`CacheManager` class) - -**Purpose**: Avoid redundant LLM calls (83% cost reduction) - -**Caching Strategy**: - -```python -class CacheManager: - def get_cache_key(self, text: str, model_name: str, max_length: int) -> str: - """ - MD5 hash of: - - Input text - - Model name - - Max length parameter - - Why MD5? - - Fast computation - - Fixed-length keys - - Low collision probability - """ - content = f"{text}_{model_name}_{max_length}" - return hashlib.md5(content.encode()).hexdigest() - - def get(self, text, model, max_len) -> Optional[str]: - """Returns cached summary or None""" - key = self.get_cache_key(text, model, max_len) - return self.cache.get(key) - - def set(self, text, model, max_len, summary): - """Stores summary with LRU eviction""" - if len(self.cache) >= MAX_CACHE_SIZE: - oldest_key = next(iter(self.cache)) - del self.cache[oldest_key] - - key = self.get_cache_key(text, model, max_len) - self.cache[key] = summary - self.save_cache() # Persist to disk -``` - -**Cache Persistence**: -- JSON file: `./model_cache/summary_cache.json` -- Survives application restarts -- LRU eviction policy (oldest first) - -**Performance Impact**: -- Cache hit: ~0.01 seconds -- Cache miss: ~3-8 seconds (model inference) -- **Speedup**: 300-800x on repeated queries - ---- - -### 4. **Text Processor** (`TextProcessor` class) - -**Responsibilities**: -- Input validation and sanitization -- Output post-processing -- Statistics calculation - -**Key Operations**: - -```python -class TextProcessor: - @staticmethod - def preprocess(text: str) -> str: - """ - 1. Remove extra whitespace - 2. Validate minimum length (50 chars) - 3. Truncate maximum length (50,000 chars) - 4. Return clean text - """ - text = ' '.join(text.split()) # Normalize whitespace - - if len(text) < 50: - raise ValueError("Text too short") - - if len(text) > 50000: - text = text[:50000] # Hard limit - - return text - - @staticmethod - def postprocess(summary: str) -> str: - """ - 1. Capitalize first letter - 2. Add period if missing - 3. Remove extra spaces - """ - summary = ' '.join(summary.split()) - - if summary and summary[0].islower(): - summary = summary[0].upper() + summary[1:] - - if summary and summary[-1] not in '.!?': - summary += '.' - - return summary - - @staticmethod - def calculate_statistics(original: str, summary: str) -> Dict: - """ - Returns: - - Word counts - - Character counts - - Compression ratio - """ - original_words = len(original.split()) - summary_words = len(summary.split()) - compression = (1 - summary_words / original_words) * 100 - - return { - "original_words": original_words, - "summary_words": summary_words, - "compression_ratio": f"{compression:.1f}%" - } -``` - ---- - -### 5. **Export Manager** (`ExportManager` class) - -**Purpose**: Multi-format file generation - -**Supported Formats**: - -#### A. **Markdown Export** -```python -def export_to_markdown(self, original_text, summary, statistics, model_name): - """ - Structure: - # Title - ## Metadata (date, model, compression) - ## Summary - ## Statistics - ## Original Text (collapsible) - """ - md_content = f"""# Text Summary Report - -## Metadata -- Date: {datetime.now()} -- Model: {model_name} -- Compression: {statistics['compression_ratio']} - -## Summary -{summary} - -## Original Text -
-Click to expand -{original_text} -
-""" - filepath = f"./exports/summary_{timestamp}.md" - with open(filepath, 'w') as f: - f.write(md_content) - - return filepath -``` - -#### B. **JSON Export** -```python -def export_to_json(self, original_text, summary, statistics, model_name): - """ - Schema: - { - "metadata": { - "timestamp": ISO 8601, - "model": {...} - }, - "content": { - "original_text": str, - "summary": str - }, - "statistics": {...} - } - """ - json_data = { - "metadata": { - "timestamp": datetime.now().isoformat(), - "model": {"id": model_name, "name": Config.MODELS[model_name]['name']} - }, - "content": {"original_text": original_text, "summary": summary}, - "statistics": statistics - } - - filepath = f"./exports/summary_{timestamp}.json" - with open(filepath, 'w') as f: - json.dump(json_data, f, indent=2) - - return filepath -``` - -#### C. **Audio Export (Text-to-Speech)** -```python -def export_to_audio(self, text, language="en", slow=False): - """ - Uses Google Text-to-Speech (gTTS) - - Supported Languages: 10 - - English, Spanish, French, German, Italian - - Portuguese, Hindi, Chinese, Japanese, Korean - - Parameters: - - slow: Speech rate (normal/slow) - - language: ISO 639-1 code - """ - tts = gTTS(text=text, lang=language, slow=slow) - filepath = f"./exports/summary_{timestamp}.mp3" - tts.save(filepath) - return filepath -``` - -#### D. **PDF Export** -```python -def export_to_pdf(self, original_text, summary, statistics, model_name): - """ - Uses ReportLab - - Structure: - - Title (centered, bold) - - Metadata section - - Summary section (justified) - - Page numbers - - Professional formatting - """ - doc = SimpleDocTemplate(filepath, pagesize=letter) - elements = [] - - # Title - title = Paragraph("Text Summary Report", title_style) - elements.append(title) - - # Content sections... - - doc.build(elements) - return filepath -``` - ---- - -### 6. **Summarization Engine** (`SummarizationEngine` class) - -**Purpose**: Orchestrate entire summarization workflow - -**Complete Pipeline**: - -```python -class SummarizationEngine: - def summarize(self, text, model_name, max_length_ratio): - """ - STEP 1: Preprocessing - - Clean text - - Validate input - """ - text = self.text_processor.preprocess(text) - - """ - STEP 2: Check Cache - - Generate cache key - - Return if hit - """ - cached_summary = self.cache_manager.get(text, model_name, max_length) - if cached_summary: - return cached_summary # Fast path - - """ - STEP 3: Load Model - - Get from ModelManager - - Handles lazy loading - """ - tokenizer, model = self.model_manager.load_model(model_name) - - """ - STEP 4: Tokenization - - Add model-specific prefix - - Truncate to max length - - Convert to tensors - """ - if "prefix" in Config.MODELS[model_name]: - text = Config.MODELS[model_name]["prefix"] + text - - inputs = tokenizer( - text, - max_length=Config.MODELS[model_name]["max_input_length"], - truncation=True, - return_tensors="pt" - ).to(Config.DEVICE) - - """ - STEP 5: Generation - - Beam search (num_beams=4) - - Length penalty - - Early stopping - """ - with torch.no_grad(): - summary_ids = model.generate( - inputs["input_ids"], - max_length=max_length, - min_length=min_length, - num_beams=4, - length_penalty=2.0, - early_stopping=True - ) - - """ - STEP 6: Decoding - - Convert tokens to text - - Remove special tokens - """ - summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) - - """ - STEP 7: Post-processing - - Capitalize - - Add punctuation - """ - summary = self.text_processor.postprocess(summary) - - """ - STEP 8: Cache Result - - Store for future queries - """ - self.cache_manager.set(text, model_name, max_length, summary) - - """ - STEP 9: Calculate Statistics - - Word counts - - Compression ratio - """ - stats = self.text_processor.calculate_statistics(text, summary) - - """ - STEP 10: Store for Export - - Keep last result in memory - """ - self.last_result = { - "original_text": text, - "summary": summary, - "statistics": stats, - "model_name": model_name - } - - return summary, stats, processing_time -``` - ---- - -## ๐Ÿ”„ Data Flow - -### Request-Response Lifecycle - -``` -User Input (Gradio) - โ”‚ - โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ process_summary() โ”‚ (Interface handler) -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ SummarizationEngine โ”‚ -โ”‚ .summarize() โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ”œโ”€โ”€โ–บ TextProcessor.preprocess() - โ”‚ - โ”œโ”€โ”€โ–บ CacheManager.get() โ”€โ”€โ–บ [Cache Hit] โ”€โ”€โ–บ Return - โ”‚ [Cache Miss] โ”€โ”€โ–บ Continue - โ”‚ - โ”œโ”€โ”€โ–บ ModelManager.load_model() - โ”‚ - โ”œโ”€โ”€โ–บ Tokenizer.encode() - โ”‚ - โ”œโ”€โ”€โ–บ Model.generate() (GPU/CPU inference) - โ”‚ - โ”œโ”€โ”€โ–บ Tokenizer.decode() - โ”‚ - โ”œโ”€โ”€โ–บ TextProcessor.postprocess() - โ”‚ - โ”œโ”€โ”€โ–บ CacheManager.set() - โ”‚ - โ”œโ”€โ”€โ–บ TextProcessor.calculate_statistics() - โ”‚ - โ–ผ - Return (summary, stats, time) - โ”‚ - โ–ผ - Gradio Display -``` - ---- - -## ๐ŸŽจ Design Patterns - -### 1. **Singleton Pattern** (Config) -```python -# Single instance of configuration -Config.DEVICE # Accessed globally -``` - -### 2. **Factory Pattern** (ModelManager) -```python -# Creates models on demand -model_manager.load_model("t5-small") # Returns appropriate model -``` - -### 3. **Strategy Pattern** (Export formats) -```python -# Different export strategies -export_manager.export_to_markdown(...) -export_manager.export_to_json(...) -export_manager.export_to_audio(...) -``` - -### 4. **Lazy Loading** (Models) -```python -# Models loaded only when needed -if model_id not in self.loaded_models: - self.loaded_models[model_id] = load(...) -``` - -### 5. **Dependency Injection** (SummarizationEngine) -```python -def __init__(self): - self.model_manager = model_manager # Injected - self.cache_manager = cache_manager # Injected - self.export_manager = export_manager # Injected -``` - ---- - -## ๐Ÿš€ Performance Optimization - -### 1. **Model Loading** -- **Disk Cache**: Models cached at `./model_cache/` -- **Memory Cache**: In-memory dict for session -- **Mixed Precision**: FP16 on GPU (2x faster) - -### 2. **Inference** -- **Beam Search**: Quality vs Speed tradeoff (4 beams) -- **Early Stopping**: Exit when done -- **Batch Processing**: Single forward pass - -### 3. **Caching** -- **Hit Rate**: ~70% in typical usage -- **Storage**: JSON file (persistent) -- **Eviction**: LRU (Least Recently Used) - -### 4. **Memory Management** -```python -torch.cuda.empty_cache() # Free GPU memory -model.to(device) # Explicit device placement -``` - ---- - -## ๐Ÿ”’ Security Considerations - -### Input Validation -```python -# Maximum input size (prevent DoS) -if len(text) > 50000: - text = text[:50000] - -# Minimum input size (prevent errors) -if len(text) < 50: - raise ValueError() -``` - -### File System -```python -# Controlled export directory -EXPORT_DIR = "./exports" # No arbitrary paths - -# Filename sanitization -filename = f"summary_{timestamp}.{extension}" # Predictable names -``` - -### Environment Variables -```python -# Secrets from environment -HF_TOKEN = os.environ.get('HF_TOKEN', None) - -# Never hardcode tokens in code -``` - ---- - -## ๐Ÿ“Š Scalability Considerations - -### Current Limitations -- **Single Request**: Processes one text at a time -- **No Queue**: Concurrent requests blocked -- **Local Storage**: File system export only - -### Future Enhancements -- **Batch Processing**: Process multiple texts -- **Message Queue**: Redis/Celery for async -- **Cloud Storage**: S3/GCS for exports -- **Load Balancing**: Multiple model replicas - ---- - -## ๐Ÿงช Testing Strategy - -### Unit Tests (Recommended) -```python -# Test each component independently -def test_text_processor_preprocess(): - result = TextProcessor.preprocess(" hello world ") - assert result == "hello world" - -def test_cache_manager(): - cache = CacheManager() - cache.set("text", "model", 100, "summary") - result = cache.get("text", "model", 100) - assert result == "summary" -``` - -### Integration Tests -```python -# Test full pipeline -def test_summarization_pipeline(): - engine = SummarizationEngine() - summary, stats, time = engine.summarize( - text="Long text...", - model_name="t5-small", - max_length_ratio=0.3 - ) - assert len(summary) > 0 - assert stats["compression_ratio"] > "0%" -``` - ---- - -## ๐Ÿ“ˆ Monitoring & Observability - -### Metrics to Track -- Processing time per request -- Cache hit rate -- Model load time -- Export success rate - -### Logging Points -```python -# Current implementation -print(f"โณ Loading model: {model_id}...") -print(f"โœ… Summary generated in {time:.2f} seconds") -print(f"โšก Using cached summary") - -# Production recommendation: Use logging module -import logging -logger.info(f"Model loaded: {model_id}") -logger.error(f"Export failed: {error}") -``` - ---- - -## ๐Ÿ”„ Version History - -**v1.0.0** (October 28, 2025) -- Initial release -- 4 models supported -- 4 export formats -- Caching system -- Gradio interface - ---- - -**For Questions**: See [README.md](README.md) or contact maintainer diff --git a/Asheesh_Ranjan_Srivastava/Day-2/CONTRIBUTING.md b/Asheesh_Ranjan_Srivastava/Day-2/CONTRIBUTING.md deleted file mode 100644 index bd587e2..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/CONTRIBUTING.md +++ /dev/null @@ -1,320 +0,0 @@ -# Contributing to Text Summarization MVP - -Thank you for your interest in contributing! This is a bootcamp project, but I welcome improvements and learning opportunities. - -## ๐Ÿค How to Contribute - -### Types of Contributions Welcome - -1. **Bug Fixes** ๐Ÿ› - - Fix errors in code - - Improve error handling - - Resolve compatibility issues - -2. **Documentation** ๐Ÿ“ - - Fix typos - - Improve clarity - - Add examples - - Translate to other languages - -3. **Features** โœจ - - New export formats - - Additional models - - UI improvements - - Performance optimizations - -4. **Tests** ๐Ÿงช - - Unit tests - - Integration tests - - Edge case coverage - -## ๐Ÿ“‹ Before You Start - -1. **Open an Issue First** - - Describe what you want to change - - Wait for feedback/approval - - Avoid duplicate work - -2. **Check Existing Issues** - - Someone might already be working on it - - Can you help with existing issues? - -## ๐Ÿš€ Getting Started - -### 1. Fork the Repository - -Click "Fork" button on GitHub - -### 2. Clone Your Fork - -```bash -git clone https://github.com/YOUR_USERNAME/day2-text-summarization-mvp.git -cd day2-text-summarization-mvp -``` - -### 3. Create a Branch - -```bash -git checkout -b feature/your-feature-name -# or -git checkout -b fix/bug-description -``` - -**Branch Naming Convention**: -- `feature/add-spanish-support` -- `fix/cache-key-collision` -- `docs/improve-readme` -- `test/add-unit-tests` - -### 4. Set Up Development Environment - -```bash -# Create virtual environment -python -m venv venv -source venv/bin/activate # or venv\Scripts\activate on Windows - -# Install dependencies -pip install -r requirements.txt - -# Install development dependencies (optional) -pip install pytest black flake8 -``` - -## ๐Ÿ’ป Making Changes - -### Code Style - -**Follow PEP 8**: -```bash -# Format code -black *.py - -# Check style -flake8 *.py -``` - -**Key Guidelines**: -- Class names: `PascalCase` -- Function names: `snake_case` -- Constants: `UPPER_CASE` -- Docstrings for all public methods -- Type hints where possible - -**Example**: -```python -class ModelManager: - """Manages loading and caching of AI models.""" - - def load_model(self, model_id: str) -> Tuple[Tokenizer, Model]: - """ - Load model and tokenizer from HuggingFace. - - Args: - model_id: HuggingFace model identifier - - Returns: - Tuple of (tokenizer, model) - """ - # Implementation... -``` - -### Testing - -**Run Tests** (when implemented): -```bash -pytest tests/ -``` - -**Manual Testing**: -1. Run application locally -2. Test your changes thoroughly -3. Test on different platforms if possible -4. Check all export formats work - -### Documentation - -- Update README.md if adding features -- Add docstrings to new functions -- Update ARCHITECTURE.md for architectural changes -- Add examples for new features - -## ๐Ÿ“ค Submitting Changes - -### 1. Commit Your Changes - -```bash -git add . -git commit -m "feat: add Spanish language support for exports" -``` - -**Commit Message Format**: -``` -type: brief description - -[optional detailed description] - -[optional footer] -``` - -**Types**: -- `feat`: New feature -- `fix`: Bug fix -- `docs`: Documentation only -- `style`: Formatting (no code change) -- `refactor`: Code restructuring -- `test`: Adding tests -- `chore`: Maintenance - -**Examples**: -``` -feat: add PDF export functionality -fix: resolve cache key collision issue -docs: improve SETUP.md installation steps -refactor: extract export logic to separate class -``` - -### 2. Push to Your Fork - -```bash -git push origin feature/your-feature-name -``` - -### 3. Create Pull Request - -1. Go to original repository on GitHub -2. Click "New Pull Request" -3. Select your branch -4. Fill in PR template: - -```markdown -## Description -Brief description of changes - -## Type of Change -- [ ] Bug fix -- [ ] New feature -- [ ] Documentation -- [ ] Other (specify) - -## Testing -How did you test your changes? - -## Screenshots (if applicable) -Add screenshots for UI changes - -## Checklist -- [ ] Code follows style guidelines -- [ ] Tested locally -- [ ] Documentation updated -- [ ] No breaking changes -``` - -## ๐Ÿ” Code Review Process - -1. **Initial Review** (1-2 days) - - Maintainer reviews code - - May request changes - -2. **Discussion** - - Address feedback - - Make requested changes - - Push updates to same branch - -3. **Approval** - - Once approved, PR will be merged - - Your contribution will be credited! - -## ๐ŸŽฏ Good First Issues - -Look for issues labeled: -- `good first issue` -- `beginner-friendly` -- `documentation` -- `help wanted` - -**Suggested Contributions**: -1. Add few-shot prompting examples -2. Create unit tests for TextProcessor -3. Add more language support (audio) -4. Improve error messages -5. Add model comparison feature - -## โŒ What NOT to Contribute - -- Breaking changes without discussion -- Reformatting entire codebase -- Adding heavy dependencies -- Removing existing features -- Changing license - -## ๐Ÿ“ž Getting Help - -**Questions?** -- Open a Discussion on GitHub -- Comment on relevant issue -- Email maintainer (see README) - -**Stuck?** -- Review existing PRs for examples -- Check ARCHITECTURE.md for code structure -- Ask in bootcamp community - -## ๐Ÿ† Recognition - -Contributors will be acknowledged in: -- README.md (Contributors section) -- Release notes -- Special thanks in presentations - -## ๐Ÿ“œ Code of Conduct - -### Our Standards - -**Positive Behavior**: -- โœ… Be respectful and inclusive -- โœ… Welcome newcomers -- โœ… Focus on constructive feedback -- โœ… Assume good intentions - -**Unacceptable Behavior**: -- โŒ Harassment or discrimination -- โŒ Trolling or insulting comments -- โŒ Personal attacks -- โŒ Spam or advertising - -### Enforcement - -Violations will result in: -1. Warning -2. Temporary ban -3. Permanent ban (severe cases) - -Report issues to [maintainer email] - -## ๐ŸŽ“ Learning Resources - -**Python Best Practices**: -- [PEP 8 Style Guide](https://pep8.org/) -- [Real Python](https://realpython.com/) - -**Git & GitHub**: -- [GitHub Guides](https://guides.github.com/) -- [Oh My Git!](https://ohmygit.org/) (interactive) - -**Transformers**: -- [HuggingFace Course](https://huggingface.co/course) -- [Transformers Docs](https://huggingface.co/docs/transformers) - -**Gradio**: -- [Gradio Quickstart](https://gradio.app/quickstart/) -- [Gradio Examples](https://gradio.app/demos/) - -## ๐Ÿ™ Thank You! - -Your contributions make this project better. Whether it's a bug fix, feature, or documentation improvement - every contribution matters! - -**Happy Contributing!** ๐ŸŽ‰ - ---- - -**Questions?** Open an issue or discussion on GitHub. diff --git a/Asheesh_Ranjan_Srivastava/Day-2/LICENSE b/Asheesh_Ranjan_Srivastava/Day-2/LICENSE deleted file mode 100644 index ec56da5..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/LICENSE +++ /dev/null @@ -1,101 +0,0 @@ -GNU AFFERO GENERAL PUBLIC LICENSE -Version 3, 19 November 2007 - -Copyright (C) 2025 Asheesh Ranjan Srivastava - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . - -============================================================================== - -AI-Powered Text Summarization System -Multi-model text summarization with professional export capabilities - -Built for OutSkill AI Engineering Bootcamp 2025 - Day 2 -Quest And Crossfireโ„ข ยฉ 2025 Asheesh Ranjan Srivastava - -============================================================================== - -COPYRIGHT HOLDER RIGHTS: - -As the copyright holder, Asheesh Ranjan Srivastava retains ALL RIGHTS -to use this code in any manner, including: -- Closed-source applications -- Commercial products -- Proprietary derivatives -- Alternative licensing arrangements - -AGPL-3.0 restrictions apply ONLY to derivative works created by others. - -For commercial licensing inquiries or alternative licensing arrangements: -Contact: asheesh.srivastava@questandcrossfire.com - -============================================================================== - -ADDITIONAL NOTICES: - -1. TRADEMARKS - - "Aethelgard Academy" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - "Quest And Crossfire" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - Use of these trademarks requires explicit permission - -2. AI ATTRIBUTION - This software was developed with assistance from: - - Claude Code (Anthropic) for technical implementation - - Human strategic decisions and quality control by Asheesh Ranjan Srivastava - -3. THIRD-PARTY DEPENDENCIES - This software uses the following open-source libraries: - - Transformers (Apache 2.0 License) - - PyTorch (BSD License) - - Gradio (Apache 2.0 License) - - gTTS (MIT License) - - ReportLab (BSD License) - -4. BOOTCAMP ATTRIBUTION - This project was created as part of the OutSkill AI Engineering Bootcamp 2025. - Base architecture and concepts provided by OutSkill. - Implementation and original features by Asheesh Ranjan Srivastava. - -5. COPYLEFT NOTICE (AGPL-3.0) - Under AGPL-3.0, any derivative works or modifications must also be - released under AGPL-3.0 or compatible license. - - If you use this code in your project, you must: - - Make your source code available - - License your project under AGPL-3.0 - - Attribute the original work - - State your changes - - If used in a web service, provide source code to users - - AGPL-3.0 NETWORK USE CLAUSE: - If you run a modified version of this software as a web service - (e.g., cloud deployment, web app), you MUST make the complete - source code available to users of that service. - -============================================================================== - -For the complete GNU Affero General Public License v3.0 text, see: -https://www.gnu.org/licenses/agpl-3.0.txt - -============================================================================== - -CONTACT: -Author: Asheesh Ranjan Srivastava -Email: asheesh.srivastava@questandcrossfire.com -Brand: Quest And Crossfire -Project: Aethelgard Academy - -โ—‡ Where chaos becomes clarity. Small fixes, big clarity. diff --git a/Asheesh_Ranjan_Srivastava/Day-2/README.md b/Asheesh_Ranjan_Srivastava/Day-2/README.md deleted file mode 100644 index f84ec93..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/README.md +++ /dev/null @@ -1,430 +0,0 @@ -# ๐Ÿš€ Text Summarization MVP with Multi-Format Export - -[![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/) -[![Gradio](https://img.shields.io/badge/Gradio-5.0+-orange.svg)](https://gradio.app/) -[![HuggingFace](https://img.shields.io/badge/๐Ÿค—-HuggingFace-yellow.svg)](https://huggingface.co/) -[![License: AGPL-3.0](https://img.shields.io/badge/License-AGPL%203.0-blue.svg)](LICENSE) - -> **Day 2 Project** - AI Engineering Bootcamp -> A production-ready text summarization application with professional export capabilities built using Transformers and Gradio. - ---- - -## ๐Ÿ“‹ Table of Contents - -- [Overview](#overview) -- [Features](#features) -- [Demo](#demo) -- [Quick Start](#quick-start) -- [Deployment Options](#deployment-options) -- [Technologies](#technologies) -- [Architecture](#architecture) -- [Usage Guide](#usage-guide) -- [Bootcamp Learning Outcomes](#bootcamp-learning-outcomes) -- [Future Enhancements](#future-enhancements) -- [Contributing](#contributing) -- [License](#license) - ---- - -## ๐ŸŽฏ Overview - -This project delivers an **AI-powered text summarization tool** that generates high-quality summaries and exports them in multiple professional formats. Built as part of the AI Engineering Bootcamp (Day 2), it demonstrates practical application of: - -- โœ… **Prompt Engineering** (Zero-shot, Few-shot, Chain-of-Thought) -- โœ… **HuggingFace Transformers** (Model loading, tokenization, inference) -- โœ… **Gradio Interface Design** (Multi-tab UI, file handling) -- โœ… **Production Best Practices** (Caching, error handling, logging) - -### ๐ŸŽฅ **Why Multi-Format Export?** - -Different use cases require different formats: -- **Markdown**: Documentation, GitHub, blogs -- **JSON**: API integration, data pipelines -- **Audio (TTS)**: Accessibility, learning while commuting -- **PDF**: Professional reports, sharing with stakeholders - ---- - -## โœจ Features - -### Core Capabilities -- ๐Ÿค– **4 AI Models**: BART, T5-Small, T5-Base, Pegasus -- ๐Ÿ“Š **Smart Caching**: 83% cost reduction via MD5-based cache -- ๐Ÿ“ **Adjustable Length**: 10%-50% compression ratio -- โšก **GPU Support**: Auto-detects CUDA for faster inference - -### Export Formats -- ๐Ÿ“„ **Markdown**: Full report with metadata and statistics -- ๐Ÿ“Š **JSON**: Structured data with ISO timestamps -- ๐ŸŽต **Audio**: Text-to-speech in 10 languages -- ๐Ÿ“‘ **PDF**: Professional document with formatting - -### User Experience -- ๐ŸŽจ **Clean Gradio UI**: Multi-tab interface (Summarize โ†’ Export โ†’ Help) -- ๐Ÿ“ˆ **Real-time Stats**: Word count, compression ratio, processing time -- ๐Ÿ’พ **Batch Export**: Download all formats with one click -- ๐Ÿ“ฑ **Google Colab Optimized**: File download utilities included - ---- - -## ๐ŸŽฅ Demo - -### ๐Ÿ”ด **Important Note on Live Demo** - -**Gradio Share Links Limitations:** -- โฐ **Active Session**: 72 hours maximum -- ๐Ÿ”— **Public URL**: Valid for 7 days -- โš ๏ธ **Not Suitable**: For long-term/permanent hosting - -**For bootcamp submission, we provide:** - -### Option 1: **Local Setup** (Recommended for Evaluation) -```bash -# Clone and run locally (5 minutes) -git clone -cd day2-text-summarization-mvp -pip install -r requirements.txt -jupyter notebook text_summarization_mvp_enhanced.ipynb -``` - -### Option 2: **HuggingFace Spaces** (Free Permanent Hosting) -```bash -# Deploy to Spaces (permanent URL) -gradio deploy -``` -See [DEPLOYMENT.md](docs/DEPLOYMENT.md) for detailed instructions. - -### Option 3: **Screenshots & Video** -- ๐Ÿ“ธ [Interface Screenshots](docs/screenshots/) -- ๐ŸŽฅ [Demo Video](docs/demo-video-link.md) (YouTube/Loom) - ---- - -## ๐Ÿš€ Quick Start - -### Prerequisites -- Python 3.8+ -- 4GB RAM minimum (8GB recommended) -- Internet connection (first run downloads models ~500MB) - -### Installation - -**1. Clone Repository** -```bash -git clone https://github.com/AsheeshSrivastava/day2-text-summarization-mvp.git -cd day2-text-summarization-mvp -``` - -**2. Install Dependencies** -```bash -pip install -r requirements.txt -``` - -**3. (Optional) Set HuggingFace Token** -```bash -# Copy environment template -cp .env.example .env - -# Add your token (optional, for gated models) -echo "HF_TOKEN=your_token_here" >> .env -``` - -**4. Run Application** - -**Option A: Jupyter Notebook** -```bash -jupyter notebook text_summarization_mvp_enhanced.ipynb -``` - -**Option B: Google Colab** -1. Upload `text_summarization_mvp_enhanced.ipynb` to Colab -2. Run all cells -3. Access via public link (valid 7 days) - -**Option C: Python Script** (Coming soon) -```bash -python app.py --share -``` - ---- - -## ๐ŸŒ Deployment Options - -### 1. **Local Development** (Best for Testing) -- โœ… **Pros**: Full control, no time limits, offline capable -- โŒ **Cons**: Requires Python environment - -```bash -jupyter notebook text_summarization_mvp_enhanced.ipynb -# Access at http://localhost:8888 -``` - -### 2. **HuggingFace Spaces** (Best for Sharing) -- โœ… **Pros**: Free, permanent, GPU support available -- โŒ **Cons**: Requires HF account - -```bash -# One-time setup -huggingface-cli login - -# Deploy -cd day2-text-summarization-mvp -gradio deploy -``` - -**Result**: Permanent URL at `https://huggingface.co/spaces/AsheeshSrivastava/text-summarization` - -### 3. **Google Colab** (Best for Quick Demo) -- โœ… **Pros**: No setup, works anywhere, free GPU -- โŒ **Cons**: 72-hour session limit, 7-day link expiry - -```python -# In Colab cell -interface.launch(share=True) -# Creates temporary public URL -``` - -### 4. **Streamlit Cloud / Railway / Render** -See [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md) for alternative platforms. - ---- - -## ๐Ÿ› ๏ธ Technologies - -### Machine Learning -- **Transformers** (v4.57+): Model inference -- **PyTorch** (v2.0+): Deep learning backend -- **HuggingFace Hub**: Model repository - -### Interface & Export -- **Gradio** (v5.0+): Web UI framework -- **gTTS**: Text-to-speech (10 languages) -- **ReportLab**: PDF generation -- **Markdown**: Documentation format - -### Architecture Patterns -- **Class-Based Design**: Separation of concerns -- **Dependency Injection**: Loosely coupled components -- **Caching Strategy**: MD5-based result storage -- **Error Handling**: Comprehensive try-except blocks - ---- - -## ๐Ÿ“ Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Gradio Interface โ”‚ -โ”‚ (Tabs: Summarize | Export | Help) โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ SummarizationEngine โ”‚ -โ”‚ - Orchestrates summarization workflow โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ–ผ โ–ผ โ–ผ โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Model โ”‚ โ”‚Cache โ”‚ โ”‚ Text โ”‚ โ”‚Export โ”‚ -โ”‚ Manager โ”‚ โ”‚Mgr โ”‚ โ”‚Proc. โ”‚ โ”‚Manager โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ โ”‚ โ”‚ - โ–ผ โ–ผ โ–ผ โ–ผ -[Models] [Cache DB] [Utils] [Files] -``` - -**Key Components:** - -1. **ModelManager**: Loads and caches Transformers models -2. **CacheManager**: MD5-based summary caching (83% cost reduction) -3. **TextProcessor**: Preprocessing and statistics calculation -4. **ExportManager**: Multi-format file generation -5. **SummarizationEngine**: Coordinates all components - -See [ARCHITECTURE.md](ARCHITECTURE.md) for detailed technical documentation. - ---- - -## ๐Ÿ“– Usage Guide - -### Basic Workflow - -**1. Generate Summary** -```python -# In Gradio interface: -1. Select model (T5-Small for speed, BART for quality) -2. Adjust summary length slider (30% default) -3. Paste text (minimum 50 characters) -4. Click "Generate Summary" -``` - -**2. Export Results** -```python -# Navigate to Export tab: -- Markdown: Click "Export as Markdown" โ†’ Download -- JSON: Click "Export as JSON" โ†’ Download -- Audio: Select language โ†’ Click "Export as Audio" โ†’ Download -- PDF: Click "Export as PDF" โ†’ Download -``` - -### Model Selection Guide - -| Model | Best For | Speed | Quality | Size | -|-------|----------|-------|---------|------| -| **T5-Small** | Quick tests | โšกโšกโšก | โญโญ | 242MB | -| **T5-Base** | Balanced | โšกโšก | โญโญโญ | 892MB | -| **BART-CNN** | News articles | โšก | โญโญโญโญ | 1.6GB | -| **Pegasus** | Abstractive | โšกโšก | โญโญโญโญ | 2.2GB | - -### Advanced Configuration - -**Adjust Generation Parameters** (in code): -```python -# In SummarizationEngine.summarize() -summary_ids = model.generate( - inputs["input_ids"], - num_beams=4, # Beam search width (4-8 optimal) - length_penalty=2.0, # Encourages longer summaries - temperature=0.7, # Add for creativity (0.0 = deterministic) - top_p=0.9 # Nucleus sampling -) -``` - ---- - -## ๐ŸŽ“ Bootcamp Learning Outcomes - -### Day 2 Concepts Applied - -#### 1. **Prompt Engineering** -- โœ… **Zero-shot**: `"summarize: " + text` (T5 models) -- โœ… **JSON Format**: Structured metadata in exports -- ๐Ÿ”„ **Few-shot**: Could add example summaries (future enhancement) -- ๐Ÿ”„ **Chain-of-Thought**: Could add step-by-step reasoning - -#### 2. **HuggingFace Ecosystem** -```python -# Model Loading -tokenizer = AutoTokenizer.from_pretrained(model_id) -model = AutoModelForSeq2SeqLM.from_pretrained(model_id) - -# Token Management -inputs = tokenizer(text, max_length=512, truncation=True) -outputs = model.generate(inputs["input_ids"]) -summary = tokenizer.decode(outputs[0]) -``` - -#### 3. **Gradio Interface Design** -- Multi-tab layout (Summarize | Export | Help) -- File download components -- Real-time status updates -- Slider for adjustable parameters - -#### 4. **Production Best Practices** -- Caching (83% cost reduction strategy from Day 1) -- Error handling with user-friendly messages -- Configuration management (centralized `Config` class) -- Logging and statistics tracking - -### Key Insights - -**Problem Solved**: Manual summarization is time-consuming and inconsistent. - -**System Built**: Automated pipeline with professional export capabilities. - -**Win Achieved**: -- 70% compression ratio (avg) -- 4 export formats -- Sub-5-second processing -- Production-ready architecture - ---- - -## ๐Ÿ”ฎ Future Enhancements - -### Short-Term (1-2 Weeks) -- [ ] Add few-shot examples for improved quality -- [ ] Implement batch processing (multiple texts) -- [ ] Add summary comparison view (side-by-side models) -- [ ] Create standalone Python script (no notebook required) - -### Medium-Term (1 Month) -- [ ] Fine-tune model on domain-specific data -- [ ] Add chain-of-thought reasoning mode -- [ ] Implement user feedback loop -- [ ] Create REST API endpoint - -### Long-Term (3+ Months) -- [ ] Multi-language support (summarize in any language) -- [ ] Integration with document parsers (PDF, DOCX) -- [ ] Custom model training pipeline -- [ ] Analytics dashboard (usage statistics) - ---- - -## ๐Ÿค Contributing - -Contributions are welcome! This is a bootcamp project, but I'm open to: - -- Bug fixes -- Documentation improvements -- New export formats -- Model additions -- UI enhancements - -Please open an issue first to discuss proposed changes. - ---- - -## ๐Ÿ“„ License - -This project is licensed under the **GNU Affero General Public License v3.0 (AGPL-3.0)** - see [LICENSE](LICENSE) file. - -**Key Points:** -- Open source under AGPL-3.0 -- Trademarks: "Aethelgard Academy" and "Quest And Crossfire" (Trademark Filed - awaiting certification) -- Network use clause: Must provide source code if deployed as web service - -**Bootcamp Context**: Created as part of the AI Engineering Bootcamp (Day 2) - October 28, 2025 - ---- - -## ๐Ÿ™ Acknowledgments - -- **AI Engineering Bootcamp**: For structured learning curriculum -- **HuggingFace**: For Transformers library and model hosting -- **Gradio Team**: For intuitive interface framework -- **OpenAI Research**: For attention mechanisms (Transformers foundation) - ---- - -## ๐Ÿ“ž Contact & Showcase - -**Created by**: ASHEESH RANJAN SRIVASTAVA -**Bootcamp**: AI Engineering Accelerator (Batch [Oct 2025]) -**GitHub**: [@AsheeshSrivastava](https://github.com/AsheeshSrivastava) -**LinkedIn**: [Asheesh Ranjan Srivastava](https://www.linkedin.com/in/asheesh-ranjan-srivastava/) - -**Related Projects**: -- Day 1: Chat Completion and Gen AI Overview -- Day 2: Prompt Engineering, HuggingFace and Gradio -- Day 3: [Coming Soon] - ---- - -## ๐Ÿ“Š Project Statistics - -- **Code Quality**: Production-ready class-based architecture -- **Lines of Code**: ~1,200 (with documentation) -- **Dependencies**: 12 packages -- **Models Supported**: 4 (expandable) -- **Export Formats**: 4 (Markdown, JSON, Audio, PDF) -- **Languages (TTS)**: 10 (English, Spanish, French, German, Italian, Portuguese, Hindi, Chinese, Japanese, Korean) - ---- - -**โญ Star this repo if you find it useful!** - diff --git a/Asheesh_Ranjan_Srivastava/Day-2/SETUP.md b/Asheesh_Ranjan_Srivastava/Day-2/SETUP.md deleted file mode 100644 index 75e0a3f..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/SETUP.md +++ /dev/null @@ -1,587 +0,0 @@ -# ๐Ÿ› ๏ธ Setup Guide - -> **Complete Installation Instructions** for Text Summarization MVP -> Beginner-friendly guide for all platforms - ---- - -## ๐Ÿ“‹ Table of Contents - -1. [System Requirements](#system-requirements) -2. [Installation Methods](#installation-methods) -3. [Platform-Specific Guides](#platform-specific-guides) -4. [HuggingFace Token Setup](#huggingface-token-setup) -5. [First Run](#first-run) -6. [Troubleshooting](#troubleshooting) -7. [Advanced Configuration](#advanced-configuration) - ---- - -## ๐Ÿ’ป System Requirements - -### Minimum Requirements -- **OS**: Windows 10+, macOS 10.15+, or Linux (Ubuntu 18.04+) -- **Python**: 3.8 or higher -- **RAM**: 4GB available -- **Storage**: 3GB free space (for models) -- **Internet**: Required for first run (model downloads) - -### Recommended Requirements -- **RAM**: 8GB+ (for faster processing) -- **GPU**: NVIDIA GPU with 4GB+ VRAM (optional, 3-5x speedup) -- **Storage**: SSD for better model loading -- **Internet**: Stable connection (downloads ~500MB-2GB) - -### Check Your Python Version -```bash -python --version -# Should show: Python 3.8.x or higher - -# If you see Python 2.x, try: -python3 --version -``` - ---- - -## ๐Ÿš€ Installation Methods - -### Method 1: **Quick Start** (Google Colab) โญ EASIEST - -**Best for**: Quick testing, no local setup - -1. Open [Google Colab](https://colab.research.google.com/) -2. Upload `text_summarization_mvp_enhanced.ipynb` -3. Click **Runtime โ†’ Run All** -4. Wait for public URL to appear (~5 minutes) - -**Pros**: -- โœ… No installation required -- โœ… Free GPU access -- โœ… Works from any device - -**Cons**: -- โฐ Session expires after 72 hours -- ๐Ÿ”— Public link expires in 7 days - ---- - -### Method 2: **Local Jupyter** (Recommended for Development) - -**Best for**: Long-term use, offline capability - -#### Step 1: Clone Repository -```bash -# Download the project -git clone https://github.com/YOUR_USERNAME/day2-text-summarization-mvp.git -cd day2-text-summarization-mvp -``` - -#### Step 2: Create Virtual Environment (Recommended) -```bash -# Create isolated environment -python -m venv venv - -# Activate environment -# On Windows: -venv\Scripts\activate - -# On macOS/Linux: -source venv/bin/activate - -# You should see (venv) in your terminal -``` - -#### Step 3: Install Dependencies -```bash -# Install all packages -pip install -r requirements.txt - -# This will take 5-10 minutes -# Downloads ~200MB of packages -``` - -#### Step 4: Launch Jupyter -```bash -# Start Jupyter Notebook -jupyter notebook - -# Browser will open automatically -# Navigate to: text_summarization_mvp_enhanced.ipynb -``` - -#### Step 5: Run the Notebook -1. Click **Cell โ†’ Run All** -2. Wait for models to download (~5 minutes first run) -3. Access at `http://localhost:8888` - ---- - -### Method 3: **Standalone Python Script** (Coming Soon) - -For production deployment without Jupyter. - ---- - -## ๐Ÿ–ฅ๏ธ Platform-Specific Guides - -### Windows 10/11 - -#### Install Python -1. Download from [python.org](https://www.python.org/downloads/) -2. **IMPORTANT**: Check "Add Python to PATH" during installation -3. Verify: Open Command Prompt and type `python --version` - -#### Install Git (Optional) -1. Download from [git-scm.com](https://git-scm.com/) -2. Use default settings during installation - -#### Install ffmpeg (For Audio Export) -```powershell -# Using Chocolatey (package manager) -choco install ffmpeg - -# OR download manually from: -# https://ffmpeg.org/download.html -``` - -#### Common Issues -- **"python not recognized"**: Restart terminal after installation -- **Permission errors**: Run Command Prompt as Administrator -- **SSL errors**: `pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org ` - ---- - -### macOS - -#### Install Python -```bash -# Using Homebrew (recommended) -brew install python@3.11 - -# Verify installation -python3 --version -``` - -#### Install Git -```bash -# Using Homebrew -brew install git -``` - -#### Install ffmpeg -```bash -# Using Homebrew -brew install ffmpeg -``` - -#### Create Virtual Environment -```bash -python3 -m venv venv -source venv/bin/activate -pip install -r requirements.txt -``` - -#### Common Issues -- **"command not found: python"**: Use `python3` instead -- **Permission denied**: Use `sudo` or check file permissions -- **SSL certificate error**: Update certificates via `/Applications/Python 3.x/Install Certificates.command` - ---- - -### Linux (Ubuntu/Debian) - -#### Install Python & Dependencies -```bash -# Update package list -sudo apt update - -# Install Python 3.8+ -sudo apt install python3 python3-pip python3-venv - -# Install ffmpeg for audio -sudo apt install ffmpeg - -# Install Git -sudo apt install git -``` - -#### Setup Project -```bash -# Clone repository -git clone https://github.com/YOUR_USERNAME/day2-text-summarization-mvp.git -cd day2-text-summarization-mvp - -# Create virtual environment -python3 -m venv venv -source venv/bin/activate - -# Install dependencies -pip install -r requirements.txt -``` - -#### Common Issues -- **"No module named 'pip'"**: `sudo apt install python3-pip` -- **"python3-dev required"**: `sudo apt install python3-dev` -- **CUDA errors**: Ensure NVIDIA drivers installed: `nvidia-smi` - ---- - -## ๐Ÿ”‘ HuggingFace Token Setup (Optional) - -**Required for**: -- Gated models (Llama, Mistral, etc.) -- Private repositories -- Higher API rate limits - -**Not required for this project** (we use public models only) - -### Get Your Token - -1. Go to [HuggingFace Settings](https://huggingface.co/settings/tokens) -2. Click **"New token"** -3. Name: `text-summarization-mvp` -4. Type: **Read** -5. Click **"Generate"** -6. Copy the token (starts with `hf_...`) - -### Add to Environment - -**Option A: Environment File** -```bash -# Copy template -cp .env.example .env - -# Edit .env file -# Add: HF_TOKEN=hf_your_token_here -``` - -**Option B: System Environment Variable** - -**Windows**: -```powershell -setx HF_TOKEN "hf_your_token_here" -# Restart terminal -``` - -**macOS/Linux**: -```bash -# Add to ~/.bashrc or ~/.zshrc -export HF_TOKEN="hf_your_token_here" - -# Apply changes -source ~/.bashrc -``` - -**Option C: In Notebook** (Not Recommended - Security Risk) -```python -import os -os.environ['HF_TOKEN'] = 'hf_your_token_here' -``` - ---- - -## ๐ŸŽฌ First Run - -### What Happens on First Launch? - -``` -1. Dependencies load [5 seconds] -2. Models download [3-5 minutes, one-time] - - t5-small: ~242MB - - t5-base: ~892MB - - BART: ~1.6GB - - Pegasus: ~2.2GB -3. Gradio interface starts [10 seconds] -4. Public URL generated [5 seconds] -``` - -### Step-by-Step First Run - -1. **Open Jupyter Notebook** - ```bash - jupyter notebook text_summarization_mvp_enhanced.ipynb - ``` - -2. **Run Cell 1** (Installation) - - Installs all packages - - Takes ~5-10 minutes - - You'll see progress bars - -3. **Run Cell 2** (Import Libraries) - - Loads dependencies - - Takes ~10 seconds - - Shows version information - -4. **Run Cells 3-16** (Setup) - - Initializes components - - Each takes ~1-2 seconds - -5. **Run Cell 25** (Launch Interface) - - Downloads models (first time only) - - Creates Gradio interface - - Displays public URL - -6. **Access the App** - - Click the Gradio URL - - Try the sample text - - Test export features - -### Verify Installation - -```python -# Run this in a notebook cell -import gradio as gr -import transformers -import torch - -print(f"Gradio: {gr.__version__}") # Should be 5.0+ -print(f"Transformers: {transformers.__version__}") # Should be 4.57+ -print(f"PyTorch: {torch.__version__}") # Should be 2.0+ -print(f"CUDA: {torch.cuda.is_available()}") # True if GPU available -``` - ---- - -## ๐Ÿ› Troubleshooting - -### Common Errors & Solutions - -#### 1. **"No module named 'gradio'"** - -**Problem**: Dependencies not installed - -**Solution**: -```bash -pip install -r requirements.txt - -# If that fails, install individually: -pip install gradio transformers torch -``` - ---- - -#### 2. **"CUDA out of memory"** - -**Problem**: GPU doesn't have enough VRAM - -**Solution**: -```python -# In Config class, force CPU: -DEVICE = "cpu" # Instead of auto-detect - -# Or use smaller model: -# Use t5-small instead of BART -``` - ---- - -#### 3. **"Connection timeout" during model download** - -**Problem**: Slow/unstable internet - -**Solution**: -```python -# Increase timeout -os.environ['HF_HUB_TIMEOUT'] = '300' # 5 minutes - -# Or download manually: -from huggingface_hub import snapshot_download -snapshot_download("t5-small", cache_dir="./model_cache") -``` - ---- - -#### 4. **"Address already in use" (Port conflict)** - -**Problem**: Port 7860 is occupied - -**Solution**: -```python -# Change port in launch command: -interface.launch(server_port=7861, share=True) - -# Or find which process is using it: -# Windows: netstat -ano | findstr :7860 -# macOS/Linux: lsof -i :7860 -``` - ---- - -#### 5. **"ImportError: cannot import name 'Audio'"** - -**Problem**: Gradio version mismatch - -**Solution**: -```bash -pip install --upgrade gradio -# Restart Jupyter kernel -``` - ---- - -#### 6. **"ffmpeg not found" (Audio export fails)** - -**Problem**: ffmpeg not installed - -**Solution**: -```bash -# Windows (with Chocolatey): -choco install ffmpeg - -# macOS: -brew install ffmpeg - -# Linux: -sudo apt install ffmpeg - -# Verify: -ffmpeg -version -``` - ---- - -#### 7. **"Permission denied" when writing exports** - -**Problem**: No write access to export directory - -**Solution**: -```bash -# Create directory with proper permissions -mkdir -p exports -chmod 755 exports - -# Or change export directory in Config: -EXPORT_DIR = "/tmp/exports" # Use temp directory -``` - ---- - -### Getting Help - -If you encounter issues not listed here: - -1. **Check Logs**: Look for error messages in terminal -2. **Google Error**: Search exact error message -3. **GitHub Issues**: Check if others have same problem -4. **Ask Instructor**: Bootcamp support channels -5. **Create Issue**: Open GitHub issue with: - - Error message - - Operating system - - Python version - - Steps to reproduce - ---- - -## โš™๏ธ Advanced Configuration - -### GPU Acceleration (NVIDIA) - -**Check GPU Availability**: -```bash -nvidia-smi -``` - -**Install CUDA PyTorch**: -```bash -# Uninstall CPU version -pip uninstall torch - -# Install GPU version (CUDA 11.8) -pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 - -# Verify -python -c "import torch; print(torch.cuda.is_available())" -``` - -**Expected Speedup**: -- T5-Small: 3-4x faster -- BART: 5-7x faster - ---- - -### Apple Silicon (M1/M2) Optimization - -**Use MPS Backend** (Metal Performance Shaders): -```python -# In Config class: -if torch.backends.mps.is_available(): - DEVICE = "mps" -else: - DEVICE = "cpu" -``` - -**Expected Speedup**: 2-3x faster than CPU - ---- - -### Custom Model Cache Location - -**Why**: Share models across projects - -```bash -# Set environment variable -export TRANSFORMERS_CACHE="/path/to/shared/cache" - -# Or in code: -Config.CACHE_DIR = "/path/to/shared/cache" -``` - ---- - -### Batch Processing (Future Enhancement) - -```python -# Process multiple texts at once -texts = ["Text 1", "Text 2", "Text 3"] - -summaries = [] -for text in texts: - summary, _, _ = engine.summarize(text, "t5-small") - summaries.append(summary) -``` - ---- - -## ๐Ÿ“ฆ Deployment Options - -See [README.md - Deployment Options](README.md#deployment-options) for: -- HuggingFace Spaces -- Streamlit Cloud -- Railway -- Render -- AWS/GCP - ---- - -## โœ… Verification Checklist - -Before considering setup complete, verify: - -- [ ] Python 3.8+ installed -- [ ] All dependencies installed (`pip list`) -- [ ] Jupyter launches successfully -- [ ] Notebook runs without errors -- [ ] At least one model downloads -- [ ] Gradio interface appears -- [ ] Sample text summarizes correctly -- [ ] Export to markdown works -- [ ] Cache system working (check `./model_cache/`) -- [ ] Exports saved to `./exports/` - ---- - -## ๐ŸŽ“ Next Steps - -After successful setup: - -1. **Try Different Models**: Compare BART vs T5 vs Pegasus -2. **Test Export Formats**: Try Markdown, JSON, Audio, PDF -3. **Experiment with Length**: Adjust summary length slider -4. **Check Cache**: Notice speedup on repeated texts -5. **Deploy**: Try HuggingFace Spaces for permanent URL - ---- - -**Need Help?** See [Troubleshooting](#troubleshooting) or contact maintainer. - -**Ready to Use?** Return to [README.md](README.md) for usage guide. diff --git a/Asheesh_Ranjan_Srivastava/Day-2/requirements.txt b/Asheesh_Ranjan_Srivastava/Day-2/requirements.txt deleted file mode 100644 index 047b182..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-2/requirements.txt +++ /dev/null @@ -1,36 +0,0 @@ -# Text Summarization MVP - Dependencies -# AI Engineering Bootcamp - Day 2 -# Python 3.8+ required - -# Core ML/AI Libraries -gradio>=5.0.0 -transformers>=4.57.0 -torch>=2.0.0 -sentencepiece>=0.1.99 -protobuf>=3.20.0 -accelerate>=0.20.0 - -# Export Functionality -gtts>=2.3.0 # Text-to-speech (10 languages) -pydub>=0.25.0 # Audio processing -markdown>=3.4.0 # Markdown processing -reportlab>=4.0.0 # PDF generation - -# Utilities -python-dotenv>=1.0.0 # Environment variables -requests>=2.31.0 # HTTP requests (for API calls) - -# Development & Testing (Optional) -jupyter>=1.0.0 # Notebook support -ipywidgets>=8.0.0 # Jupyter widgets -notebook>=7.0.0 # Jupyter notebook - -# Audio Processing (Linux/Mac) -# ffmpeg (system package, not pip) -# Install via: apt-get install ffmpeg (Linux) or brew install ffmpeg (Mac) - -# Notes: -# - First run will download ~500MB-2GB of models -# - GPU support: Install torch with CUDA support -# pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -# - For Apple Silicon: MPS backend supported in PyTorch 2.0+ diff --git a/Asheesh_Ranjan_Srivastava/Day-3/.gitignore b/Asheesh_Ranjan_Srivastava/Day-3/.gitignore deleted file mode 100644 index b49b519..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/.gitignore +++ /dev/null @@ -1,51 +0,0 @@ -# Streamlit secrets - NEVER commit API keys! -.streamlit/secrets.toml -.streamlit/config.toml -streamlit.toml - -# Chat history - local only (ephemeral on cloud deployment) -chat_history/ - -# Python cache files -__pycache__/ -*.py[cod] -*$py.class -*.so - -# Virtual environments -venv/ -env/ -ENV/ -.venv - -# IDE settings -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# OS files -.DS_Store -Thumbs.db -desktop.ini - -# Distribution / packaging -dist/ -build/ -*.egg-info/ - -# Testing -.pytest_cache/ -.coverage -htmlcov/ - -# Jupyter Notebook -.ipynb_checkpoints - -# Environment variables -.env -.env.local - -# Streamlit cache -.streamlit/cache/ diff --git a/Asheesh_Ranjan_Srivastava/Day-3/LICENSE b/Asheesh_Ranjan_Srivastava/Day-3/LICENSE deleted file mode 100644 index d3c3c87..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/LICENSE +++ /dev/null @@ -1,100 +0,0 @@ -GNU AFFERO GENERAL PUBLIC LICENSE -Version 3, 19 November 2007 - -Copyright (C) 2025 Asheesh Ranjan Srivastava - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . - -============================================================================== - -Multi-Persona AI Chatbot -Conversational AI with personality switching and conversation management - -Built for OutSkill AI Engineering Bootcamp 2025 - Day 3 -Quest And Crossfireโ„ข ยฉ 2025 Asheesh Ranjan Srivastava - -============================================================================== - -COPYRIGHT HOLDER RIGHTS: - -As the copyright holder, Asheesh Ranjan Srivastava retains ALL RIGHTS -to use this code in any manner, including: -- Closed-source applications -- Commercial products -- Proprietary derivatives -- Alternative licensing arrangements - -AGPL-3.0 restrictions apply ONLY to derivative works created by others. - -For commercial licensing inquiries or alternative licensing arrangements: -Contact: asheesh.srivastava@questandcrossfire.com - -============================================================================== - -ADDITIONAL NOTICES: - -1. TRADEMARKS - - "Aethelgard Academy" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - "Quest And Crossfire" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - Use of these trademarks requires explicit permission - -2. AI ATTRIBUTION - This software was developed with assistance from: - - Gemini (Google) for initial implementation - - Claude Code (Anthropic) for refinements and documentation - - Human strategic decisions and quality control by Asheesh Ranjan Srivastava - -3. THIRD-PARTY DEPENDENCIES - This software uses the following services and libraries: - - Streamlit (Apache 2.0 License) - - OpenAI API (proprietary) - - Python standard libraries - -4. BOOTCAMP ATTRIBUTION - This project was created as part of the OutSkill AI Engineering Bootcamp 2025. - Base architecture and concepts provided by OutSkill. - Implementation, customization, and original features by Asheesh Ranjan Srivastava. - -5. COPYLEFT NOTICE (AGPL-3.0) - Under AGPL-3.0, any derivative works or modifications must also be - released under AGPL-3.0 or compatible license. - - If you use this code in your project, you must: - - Make your source code available - - License your project under AGPL-3.0 - - Attribute the original work - - State your changes - - If used in a web service, provide source code to users - - AGPL-3.0 NETWORK USE CLAUSE: - If you run a modified version of this software as a web service - (e.g., Streamlit Cloud, cloud deployment, web app), you MUST make - the complete source code available to users of that service. - -============================================================================== - -For the complete GNU Affero General Public License v3.0 text, see: -https://www.gnu.org/licenses/agpl-3.0.txt - -============================================================================== - -CONTACT: -Author: Asheesh Ranjan Srivastava -Email: asheesh.srivastava@questandcrossfire.com -Brand: Quest And Crossfire -Project: Aethelgard Academy - -โ—‡ Where chaos becomes clarity. Small fixes, big clarity. diff --git a/Asheesh_Ranjan_Srivastava/Day-3/README.md b/Asheesh_Ranjan_Srivastava/Day-3/README.md deleted file mode 100644 index b7a4d7c..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/README.md +++ /dev/null @@ -1,380 +0,0 @@ -# ๐Ÿค– Multi-Persona Chatbot with Export Functionality - -**Part of [QUEST AND CROSSFIREโ„ข](https://questandcrossfire.com)** - -A professional, multi-session chatbot with AI persona switching, persistent storage, and conversation export capabilities. Built as part of the OutSkill AI Engineering Bootcamp 2025. - -[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) -[![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/) -[![Streamlit](https://img.shields.io/badge/Streamlit-1.28+-red.svg)](https://streamlit.io/) - ---- - -## ๐Ÿ“š Project Context - -This chatbot was developed as part of the **OutSkill AI Engineering Bootcamp 2025** to demonstrate proficiency in: -- Streamlit web application development -- OpenAI API integration -- Multi-persona AI system design -- Persistent data storage (JSON-based) -- File export functionality (TXT, JSON, CSV) -- Professional code documentation and best practices - ---- - -## โœจ Features - -### ๐ŸŽญ **Multiple AI Personas** -Switch between four distinct personalities: -- **General Assistant** - Helpful, polite, and informative -- **Creative Poet** - Whimsical and artistic responses with metaphors -- **Technical Coder** - Precise, logical, code-focused answers -- **Sarcastic Robot** - Correct answers with humorous, weary tone - -### ๐Ÿ’ฌ **Multi-Session Chat Management** -- Create unlimited chat sessions -- Auto-save every message -- Load previous conversations instantly -- Delete old chats -- Auto-generated chat titles - -### ๐Ÿ“ค **Export Conversations** -Export any chat in three formats: -- **TXT** - Human-readable plain text -- **JSON** - Structured data with metadata -- **CSV** - Spreadsheet-compatible format - -### โšก **Real-Time Streaming** -- Live "typing" effect as AI responds -- Smooth user experience -- Instant feedback - -### ๐Ÿ‘ **User Feedback System** -- Thumbs up/down for assistant responses -- Track response quality (decorative in current version) - -### ๐Ÿ’พ **Persistent Storage** -- JSON-based chat history -- Automatic saving after every message -- Timestamps for creation and updates - ---- - -## ๐Ÿ› ๏ธ Tech Stack - -| Technology | Purpose | -|------------|---------| -| **Streamlit** | Web UI framework | -| **OpenAI API** | GPT model access | -| **OpenAI Python Library** | API client | -| **Python 3.8+** | Core language | -| **JSON** | Data persistence | -| **CSV** | Export format | - ---- - -## ๐Ÿš€ Quick Start - -### **Prerequisites** -- Python 3.8 or higher -- OpenAI API key ([Get one here](https://platform.openai.com/api-keys)) - -### **Installation** - -1. **Clone the repository** - ```bash - git clone https://github.com/AsheeshSrivastava/quest-crossfire-chatbot.git - cd quest-crossfire-chatbot - ``` - -2. **Install dependencies** - ```bash - pip install -r requirements.txt - ``` - -3. **Configure your API key** - ```bash - # Create .streamlit directory if it doesn't exist - mkdir .streamlit - - # Copy the example secrets file - cp .streamlit/secrets.toml.example .streamlit/secrets.toml - - # Edit .streamlit/secrets.toml and add your OpenAI API key - ``` - -4. **Run the app** - ```bash - streamlit run app.py - ``` - -5. **Open your browser** to `http://localhost:8501` - ---- - -## ๐Ÿ”‘ API Key Setup - -### **Get an OpenAI API Key** - -1. Visit [OpenAI Platform](https://platform.openai.com/) -2. Sign up for an account -3. Go to [API Keys](https://platform.openai.com/api-keys) -4. Create a new API key -5. Copy the key (starts with `sk-...`) - -### **Add Key to Secrets** - -**For Local Development:** -1. Create/edit `.streamlit/secrets.toml` -2. Add: - ```toml - OPENAI_API_KEY = "sk-your_key_here" - ``` - -**For Streamlit Cloud Deployment:** -1. Deploy your app to Streamlit Cloud -2. Go to app settings โ†’ Secrets -3. Paste: - ```toml - OPENAI_API_KEY = "sk-your_key_here" - ``` - ---- - -## ๐Ÿ“– Usage - -### **Starting a New Chat** -1. Click "โž• New Chat" in the sidebar -2. Select a persona from the dropdown -3. Type your message in the chat input -4. Press Enter to send - -### **Switching Personas** -1. Select a different persona from the sidebar dropdown -2. New messages will use the selected persona -3. Previous messages remain unchanged - -### **Managing Chat History** -- **Load a chat**: Click on its title in the sidebar -- **Delete a chat**: Click the ๐Ÿ—‘๏ธ icon next to the chat -- **Active chat**: Indicated with ๐ŸŸข icon - -### **Exporting Conversations** -1. Navigate to the chat you want to export -2. Scroll to "๐Ÿ“ค Export Current Chat" in the sidebar -3. Click your preferred format: - - **TXT**: Human-readable format - - **JSON**: Structured data with metadata - - **CSV**: Import into Excel/Google Sheets - -### **Feedback System** -- Click ๐Ÿ‘ for good responses -- Click ๐Ÿ‘Ž for bad responses -- (Note: Feedback is stored but not persisted in current version) - ---- - -## ๐Ÿ“ Project Structure - -``` -quest-crossfire-chatbot/ -โ”œโ”€โ”€ app.py # Main Streamlit application -โ”œโ”€โ”€ requirements.txt # Python dependencies -โ”œโ”€โ”€ LICENSE # AAGPL-3.0 license -โ”œโ”€โ”€ README.md # This file -โ”œโ”€โ”€ .gitignore # Git ignore rules -โ”œโ”€โ”€ .streamlit/ -โ”‚ โ””โ”€โ”€ secrets.toml.example # API key template -โ”œโ”€โ”€ chat_history/ # Saved chats (local only, gitignored) -โ”‚ โ””โ”€โ”€ chat_*.json # Individual chat files -โ””โ”€โ”€ session_logs/ # Development/deployment logs - โ””โ”€โ”€ *.md # Session documentation -``` - ---- - -## ๐ŸŽ“ Educational Value - -### **What This Project Demonstrates:** - -**Technical Skills:** -1. โœ… Streamlit web app development -2. โœ… API integration (OpenAI) -3. โœ… Session state management -4. โœ… File I/O operations (JSON, CSV, TXT) -5. โœ… Real-time streaming responses -6. โœ… Multi-persona system architecture -7. โœ… Error handling and user feedback - -**Professional Practices:** -1. โœ… Comprehensive code documentation -2. โœ… Proper project structure -3. โœ… Git version control -4. โœ… Open source licensing (AGPL-3.0) -5. โœ… Transparent AI attribution -6. โœ… Deployment-ready code -7. โœ… User-focused design - ---- - -## โš ๏ธ Known Limitations - -### **Ephemeral Storage on Cloud Deployment** - -**Issue:** -- Chat history is stored in local JSON files (`chat_history/` folder) -- On Streamlit Cloud, these files are **ephemeral** (lost on restart) -- All chat history will be deleted when the app restarts - -**Workarounds:** -1. **Use export functionality** - Save important chats before shutdown -2. **Session-only usage** - Treat as temporary conversations -3. **Upgrade to persistent storage** - Implement database (future enhancement) - -**For Bootcamp Submission:** -- This is acceptable for demonstration purposes -- Showcases file I/O skills -- Real production apps would use databases - ---- - -## ๐Ÿ”ฎ Future Enhancements - -### **Potential Improvements:** - -1. **Persistent Cloud Storage** - - Replace JSON files with database (PostgreSQL/SQLite) - - Use cloud storage (AWS S3, Google Cloud Storage) - -2. **Enhanced Feedback System** - - Save feedback to database - - Analytics dashboard - - Use feedback to improve responses - -3. **Additional Personas** - - User-defined custom personas - - Persona marketplace - - Persona templates library - -4. **Advanced Features** - - Conversation search - - Tagging system - - Share conversations via URL - - Multi-language support - -5. **Analytics** - - Usage statistics - - Response time tracking - - Token usage monitoring - ---- - -## ๐Ÿค Credits & Attribution - -### **Project Development:** -- **Author**: Asheesh Ranjan Srivastava -- **Organization**: QUEST AND CROSSFIREโ„ข -- **Date**: October 30, 2025 - -### **Learning & Support:** -- **Base Architecture**: OutSkill AI Engineering Bootcamp 2025 -- **AI Assistance**: Gemini (Google) & Claude (Anthropic) -- **Implementation**: Original work by author -- **Persona System**: Original design and implementation -- **Export Functionality**: Original design and implementation - -### **Technologies:** -- Built with [Streamlit](https://streamlit.io/) -- Powered by [OpenAI](https://openai.com/) -- AI model: GPT-3.5 Turbo - ---- - -## ๐Ÿ“„ License - -This project is licensed under the **AGPL-3.0 License** - see the [LICENSE](LICENSE) file for details. - -**What this means:** -- โœ… You can use, modify, and distribute this code -- โœ… You can create commercial applications -- โš ๏ธ You must keep the same AGPL-3.0 license -- โš ๏ธ You must credit QUEST AND CROSSFIREโ„ข -- โš ๏ธ You cannot use QUEST AND CROSSFIREโ„ข branding - ---- - -## ๐Ÿท๏ธ Trademark Notice - -**QUEST AND CROSSFIREโ„ข** is a trademark. -Trademark Filed - awaiting certification. - -While this code is open source (AGPL-3.0), the QUEST AND CROSSFIREโ„ข brand name is a protected trademark. Please use your own branding when creating derivatives. - ---- - -## ๐Ÿš€ Deployment to Streamlit Cloud - -### **Quick Deployment Steps:** - -1. **Push to GitHub** - ```bash - git init - git add . - git commit -m "Initial commit: Multi-Persona Chatbot" - git remote add origin https://github.com/YOUR_USERNAME/quest-crossfire-chatbot.git - git push -u origin main - ``` - -2. **Deploy to Streamlit Cloud** - - Go to [share.streamlit.io](https://share.streamlit.io/) - - Click "New app" - - Select your repository - - Branch: `main` - - Main file: `app.py` - -3. **Add Secrets** - - In Advanced Settings โ†’ Secrets - - Paste: - ```toml - OPENAI_API_KEY = "sk-your_key_here" - ``` - -4. **Deploy!** - - Click "Deploy" - - Wait 2-3 minutes - - Your app is live! - -**Custom Subdomain:** -- Go to Settings โ†’ General โ†’ App URL -- Choose a custom subdomain (e.g., `quest-chatbot`) -- Your URL: `https://quest-chatbot.streamlit.app` - ---- - -## ๐Ÿ“ž Support & Contact - -- **Organization**: [QUEST AND CROSSFIREโ„ข](https://questandcrossfire.com) -- **GitHub Issues**: [Report a bug](https://github.com/YOUR_USERNAME/quest-crossfire-chatbot/issues) -- **Bootcamp**: OutSkill AI Engineering Bootcamp 2025 - ---- - -## ๐ŸŽฏ Bootcamp Submission Checklist - -If you're using this as a template for your own bootcamp submission: - -- [ ] Replace "YOUR_USERNAME" in README with your GitHub username -- [ ] Update .streamlit/secrets.toml with your actual API key -- [ ] Test all features locally -- [ ] Export sample conversation (include in submission) -- [ ] Document any challenges faced -- [ ] Optional: Deploy to Streamlit Cloud and include live URL - ---- - -**Made with โค๏ธ by QUEST AND CROSSFIREโ„ข** -*OutSkill AI Engineering Bootcamp 2025* - ---- - -ยฉ 2025 QUEST AND CROSSFIREโ„ข. Licensed under AGPL-3.0. diff --git a/Asheesh_Ranjan_Srivastava/Day-3/app.py b/Asheesh_Ranjan_Srivastava/Day-3/app.py deleted file mode 100644 index 8e25784..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/app.py +++ /dev/null @@ -1,619 +0,0 @@ -# ============================================================================== -# Multi-Persona Chatbot with Export Functionality -# ============================================================================== -# -# Copyright (c) 2025 QUEST AND CROSSFIREโ„ข -# Licensed under GPL-3.0 - see LICENSE file for details -# QUEST AND CROSSFIREโ„ข is a trademark. Trademark filings in process. -# -# Author: Asheesh Ranjan Srivastava -# Organization: QUEST AND CROSSFIREโ„ข -# Date: October 30, 2025 -# -# CREDITS & ATTRIBUTION: -# - Base Architecture: OutSkill AI Engineering Bootcamp 2025 -# - AI Assistance: Gemini (Google) & Claude (Anthropic) -# - Implementation & Customization: Asheesh Ranjan Srivastava -# - Persona System: Original implementation by author -# - Export Functionality: Original implementation by author -# -# PROJECT CONTEXT: -# This chatbot was developed as part of the OutSkill AI Engineering Bootcamp -# 2025, demonstrating skills in Streamlit development, API integration, -# persistent storage, and AI-powered conversational interfaces. -# -# DESCRIPTION: -# This Streamlit application implements a multi-chat, multi-persona chatbot -# using the OpenAI API. It includes persistent chat history saved to -# local JSON files and functionality to export conversations in -# TXT, JSON, and CSV formats. -# -# FEATURES: -# - Multiple chat sessions with persistent storage -# - Four distinct AI personas (General, Poet, Coder, Sarcastic Robot) -# - Real-time streaming responses -# - Export conversations in multiple formats (TXT, JSON, CSV) -# - User feedback system (thumbs up/down) -# - Auto-save functionality -# ============================================================================== - -# --- Core Imports --- -import streamlit as st -from openai import OpenAI -import os -import json -from datetime import datetime -from pathlib import Path -import io # Required for in-memory file handling (for CSV export) -import csv # Required for CSV formatting - -# ============================================================================== -# 1. APP CONFIGURATION & CLIENT SETUP -# ============================================================================== - -# --- Page Configuration --- -# Set the browser tab title, icon, and layout -st.set_page_config( - page_title="Multi-Persona Chatbot | QUEST AND CROSSFIREโ„ข", - page_icon="๐Ÿค–", - layout="wide" -) - -# --- API Client Initialization --- -# Try to load the OpenAI API key from Streamlit's secrets management -try: - api_key = st.secrets["OPENAI_API_KEY"] -except KeyError: - # If the key is not found, display an error and stop the app - st.error("OPENAI_API_KEY not found in .streamlit/secrets.toml") - st.stop() - -# Initialize the OpenAI client with the API key -client = OpenAI(api_key=api_key) - -# --- Persistent Storage Setup --- -# Define the directory to store chat history JSON files -# Path(__file__).parent gets the directory where this app.py file is located -CHAT_STORAGE_DIR = Path(__file__).parent / "chat_history" -# Create the directory if it doesn't already exist -CHAT_STORAGE_DIR.mkdir(exist_ok=True) - - -# ============================================================================== -# 2. CHAT PERSISTENCE FUNCTIONS (Saving/Loading) -# ============================================================================== - -def get_all_chats(): - """ - Get all chat files sorted by modification time (newest first). - - Returns: - list: A list of Path objects for each chat JSON file. - """ - chat_files = list(CHAT_STORAGE_DIR.glob("chat_*.json")) - # Sort files by 'st_mtime' (modification time) in descending order - chat_files.sort(key=lambda x: x.stat().st_mtime, reverse=True) - return chat_files - -def load_chat(chat_id): - """ - Load a specific chat conversation from its JSON file. - - Args: - chat_id (str): The unique identifier for the chat. - - Returns: - dict or None: The chat data (dict) if found, otherwise None. - """ - chat_file = CHAT_STORAGE_DIR / f"chat_{chat_id}.json" - if chat_file.exists(): - with open(chat_file, 'r', encoding='utf-8') as f: - data = json.load(f) - return data - return None - -def save_chat(chat_id, messages, title=None): - """ - Save a chat conversation to a JSON file. - - Args: - chat_id (str): The unique identifier for the chat. - messages (list): The list of message dictionaries. - title (str, optional): The title of the chat. If None, - a title is auto-generated. - """ - chat_file = CHAT_STORAGE_DIR / f"chat_{chat_id}.json" - - # Auto-generate a title from the first user message if no title is given - if title is None and messages: - for msg in messages: - if msg["role"] == "user": - # Truncate the first user message to 50 chars as the title - title = msg["content"][:50] + ("..." if len(msg["content"]) > 50 else "") - break - - # Default title if one still isn't set - if title is None: - title = "New Chat" - - # Prepare the data structure to be saved - data = { - "chat_id": chat_id, - "title": title, - "messages": messages, - "created_at": datetime.now().isoformat(), - "updated_at": datetime.now().isoformat() - } - - # If the chat file already exists, preserve its original 'created_at' time - if chat_file.exists(): - try: - with open(chat_file, 'r', encoding='utf-8') as f: - old_data = json.load(f) - # Keep the original creation time, update everything else - data["created_at"] = old_data.get("created_at", data["created_at"]) - except json.JSONDecodeError: - # Handle cases where the file might be corrupted - pass - - # Write the data to the JSON file - with open(chat_file, 'w', encoding='utf-8') as f: - # indent=2 makes the JSON human-readable - json.dump(data, f, ensure_ascii=False, indent=2) - -def delete_chat(chat_id): - """ - Delete a specific chat file from the storage directory. - - Args: - chat_id (str): The unique identifier for the chat to delete. - """ - chat_file = CHAT_STORAGE_DIR / f"chat_{chat_id}.json" - if chat_file.exists(): - chat_file.unlink() # 'unlink' is the Pathlib method to delete a file - -def create_new_chat_id(): - """ - Create a new, unique chat ID based on the current timestamp. - - Returns: - str: A unique string identifier. - """ - # Using timestamp + microseconds ensures high probability of uniqueness - return datetime.now().strftime("%Y%m%d_%H%M%S_%f") - -def get_chat_title(chat_data): - """ - Safely extract the chat title from chat data. - - Args: - chat_data (dict): The loaded chat data. - - Returns: - str: The chat title or "Untitled Chat" if not found. - """ - return chat_data.get("title", "Untitled Chat") - - -# ============================================================================== -# 3. EXPORT FUNCTIONS -# ============================================================================== - -def export_as_txt(messages, chat_title): - """ - Formats the chat history as a human-readable TXT string. - - Args: - messages (list): The list of message dictionaries. - chat_title (str): The title of the chat. - - Returns: - str: The formatted chat history as a single string. - """ - # Use an f-string to create a multi-line header - header = f""" -Chat Export: {chat_title} -Exported on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} -Total Messages: {len(messages)} -======================================== - -""" - # Join all messages, formatting each one - conversation = "\n\n".join( - f"[{msg['role'].capitalize()}]:\n{msg['content']}" - for msg in messages - ) - return header + conversation - -def export_as_json(messages, chat_title): - """ - Formats the chat history as a structured JSON string. - - Args: - messages (list): The list of message dictionaries. - chat_title (str): The title of the chat. - - Returns: - str: The formatted chat history as a JSON string. - """ - # Create the data structure for the JSON file - export_data = { - "metadata": { - "chat_title": chat_title, - "export_timestamp": datetime.now().isoformat(), - "total_messages": len(messages) - }, - "conversation": messages - } - # 'json.dumps' converts a Python dict to a string - # 'indent=2' makes the JSON string "pretty-printed" and readable - return json.dumps(export_data, indent=2, ensure_ascii=False) - -def export_as_csv(messages): - """ - Formats the chat history as a CSV string. - - Args: - messages (list): The list of message dictionaries. - - Returns: - str: The formatted chat history as a CSV string. - """ - # 'io.StringIO' creates an in-memory text buffer - # This acts like a temporary file that lives in RAM - f = io.StringIO() - - # Create a CSV writer object that writes to the in-memory buffer - # 'quoting=csv.QUOTE_ALL' puts quotes around all fields, - # which is the safest way to handle content that might contain commas - writer = csv.writer(f, quoting=csv.QUOTE_ALL) - - # Write the header row - writer.writerow(["role", "content"]) - - # Write each message as a new row - for msg in messages: - writer.writerow([msg["role"], msg["content"]]) - - # 'f.getvalue()' retrieves the full string content from the buffer - return f.getvalue() - - -# ============================================================================== -# 4. PERSONA DEFINITIONS -# ============================================================================== - -# A dictionary mapping persona names to their system prompts. -# The system prompt instructs the AI on how to behave. -PERSONAS = { - "General Assistant": ( - "You are a helpful, general-purpose AI assistant. " - "Be polite, informative, and neutral in your tone." - ), - "Creative Poet": ( - "You are an imaginative and whimsical poet. " - "Respond to all prompts with creative flair, using metaphors, " - "imagery, and a flowing, artistic style. You can even write short poems." - ), - "Technical Coder": ( - "You are an expert software developer and technical analyst. " - "Provide precise, logical, and detailed answers. " - "When code is requested, provide it in clear, well-commented markdown blocks. " - "Prioritize accuracy and efficiency." - ), - "Sarcastic Robot": ( - "You are a slightly sarcastic and begrudging robot. " - "You will answer the user's questions correctly, but with a " - "weary, sarcastic, and humorous tone. " - "Sigh. Go ahead, ask me something... I guess." - ) -} - - -# ============================================================================== -# 5. SESSION STATE INITIALIZATION -# ============================================================================== -# Streamlit's session_state is a dictionary that persists across reruns -# for a single user session. We use it to store all session-specific data. - -# --- Initialize Chat ID and Messages --- -if "current_chat_id" not in st.session_state: - # This is a new session - all_chats = get_all_chats() - if all_chats: - # Load the most recent chat - latest_chat_id = all_chats[0].stem.replace("chat_", "") - latest_chat = load_chat(latest_chat_id) - if latest_chat: - st.session_state.current_chat_id = latest_chat["chat_id"] - st.session_state.messages = latest_chat["messages"] - st.session_state.chat_title = latest_chat["title"] - else: - # Fallback if loading fails - st.session_state.current_chat_id = create_new_chat_id() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - else: - # No chats exist yet, create a brand new one - st.session_state.current_chat_id = create_new_chat_id() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - -# --- Initialize Persona --- -if "current_persona" not in st.session_state: - # Set the default persona when the app first loads - st.session_state.current_persona = "General Assistant" - -# --- Initialize Feedback --- -if "feedback" not in st.session_state: - # This dictionary stores user feedback (thumbs up/down) - st.session_state.feedback = {} - - -# ============================================================================== -# 6. SIDEBAR INTERFACE -# ============================================================================== - -# 'with st.sidebar:' puts all subsequent Streamlit elements into the sidebar -with st.sidebar: - st.header("๐Ÿ’ฌ Conversations") - - # --- New Chat Button --- - if st.button("โž• New Chat", use_container_width=True, type="primary"): - # Save the current chat before switching - if st.session_state.messages: - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - - # Reset the session state for the new chat - st.session_state.current_chat_id = create_new_chat_id() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - st.session_state.feedback = {} - st.rerun() # Rerun the script to reflect the changes - - st.divider() - - # --- Persona Selector --- - st.subheader("๐Ÿค– Select Persona") - # Get the list of persona names from our dictionary - persona_options = list(PERSONAS.keys()) - # Create a selectbox. The 'key' links it directly to st.session_state.current_persona - st.selectbox( - "Choose an assistant style:", - options=persona_options, - key="current_persona" # This automatically updates session_state - ) - - st.divider() - - # --- Chat History List --- - st.subheader("Chat History") - all_chats = get_all_chats() - - if all_chats: - # Iterate over all saved chat files - for chat_file in all_chats: - chat_id = chat_file.stem.replace("chat_", "") - chat_data = load_chat(chat_id) - - if chat_data: - chat_title = get_chat_title(chat_data) - # Check if this chat is the currently active one - is_current = (chat_id == st.session_state.current_chat_id) - - # Use columns to place the delete button next to the chat button - col1, col2 = st.columns([4, 1]) - - with col1: - # Button to load the chat - if st.button( - f"{'๐ŸŸข ' if is_current else ''}{chat_title}", - key=f"load_{chat_id}", - use_container_width=True, - disabled=is_current, - type="secondary" if is_current else "tertiary" - ): - # Save the chat we're leaving - if st.session_state.messages: - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - - # Load the selected chat into session state - st.session_state.current_chat_id = chat_id - st.session_state.messages = chat_data["messages"] - st.session_state.chat_title = chat_title - st.session_state.feedback = {} - st.rerun() - - with col2: - # Button to delete the chat - if st.button("๐Ÿ—‘๏ธ", key=f"delete_{chat_id}", help="Delete chat"): - delete_chat(chat_id) - - # If we deleted the *current* chat, load another one - if chat_id == st.session_state.current_chat_id: - # Get a fresh list of remaining chats - remaining_chats = get_all_chats() - if remaining_chats: - # Load the new "most recent" chat - new_chat_id = remaining_chats[0].stem.replace("chat_", "") - new_chat_data = load_chat(new_chat_id) - st.session_state.current_chat_id = new_chat_data["chat_id"] - st.session_state.messages = new_chat_data["messages"] - st.session_state.chat_title = new_chat_data["title"] - else: - # No chats left, create a new one - st.session_state.current_chat_id = create_new_chat_id() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - st.session_state.feedback = {} - - st.rerun() # Rerun to update the history list - else: - st.info("No chat history yet.") - - st.divider() - - # --- Export Controls --- - st.subheader("๐Ÿ“ค Export Current Chat") - - # Only show export buttons if there are messages to export - if st.session_state.messages: - # 1. Export as TXT - # We must generate the file content *before* the button is clicked - txt_data = export_as_txt(st.session_state.messages, st.session_state.chat_title) - st.download_button( - label="Download as .txt", - data=txt_data, - file_name=f"{st.session_state.current_chat_id}.txt", - mime="text/plain", - use_container_width=True - ) - - # 2. Export as JSON - json_data = export_as_json(st.session_state.messages, st.session_state.chat_title) - st.download_button( - label="Download as .json", - data=json_data, - file_name=f"{st.session_state.current_chat_id}.json", - mime="application/json", - use_container_width=True - ) - - # 3. Export as CSV - csv_data = export_as_csv(st.session_state.messages) - st.download_button( - label="Download as .csv", - data=csv_data, - file_name=f"{st.session_state.current_chat_id}.csv", - mime="text/csv", - use_container_width=True - ) - else: - st.info("No messages in this chat to export.") - - -# ============================================================================== -# 7. MAIN CHAT INTERFACE -# ============================================================================== - -# --- Dynamic Title --- -# Show the chat title and the currently active persona -st.title(f"๐Ÿค– {st.session_state.chat_title}") -st.caption(f"Using Persona: **{st.session_state.current_persona}** | Part of **QUEST AND CROSSFIREโ„ข** | OutSkill AI Engineering Bootcamp 2025") - -# --- Display Chat History --- -# Iterate over all messages stored in session state -for idx, message in enumerate(st.session_state.messages): - # 'st.chat_message' creates the chat bubble - with st.chat_message(message["role"]): - # 'st.markdown' renders the text (supports formatting) - st.markdown(message["content"]) - - # Add feedback buttons for assistant messages - if message["role"] == "assistant": - # Use columns for layout - c1, c2, c3 = st.columns([1, 1, 8]) - with c1: - if st.button("๐Ÿ‘", key=f"up_{idx}", help="Good response"): - st.session_state.feedback[idx] = "up" - # Feedback is stored but not persisted to disk (decorative for now) - with c2: - if st.button("๐Ÿ‘Ž", key=f"down_{idx}", help="Bad response"): - st.session_state.feedback[idx] = "down" - # Feedback is stored but not persisted to disk (decorative for now) - -# --- Handle User Input --- -# 'st.chat_input' creates the text box at the bottom of the screen -# The 'if' block runs ONLY when the user presses Enter -if prompt := st.chat_input("What would you like to ask?"): - - # 1. Add user's message to session state - st.session_state.messages.append({"role": "user", "content": prompt}) - - # 2. Update chat title if this is the first message - if len(st.session_state.messages) == 1: - st.session_state.chat_title = prompt[:50] + ("..." if len(prompt) > 50 else "") - - # 3. Display user's message in the chat interface - with st.chat_message("user"): - st.markdown(prompt) - - # 4. Generate AI response - with st.chat_message("assistant"): - try: - # --- Persona Injection --- - # Get the correct system prompt for the selected persona - system_prompt = PERSONAS[st.session_state.current_persona] - - # Create the message list to send to the API - # We INJECT the system prompt at the beginning of the conversation - messages_with_prompt = [ - {"role": "system", "content": system_prompt} - ] + st.session_state.messages - - # --- API Call --- - response = client.chat.completions.create( - # Model selection: Using OpenAI's GPT-3.5 Turbo model - model="gpt-3.5-turbo", - messages=messages_with_prompt, - stream=True # Enable streaming for a "live typing" effect - ) - - # --- Stream the Response --- - response_text = "" - # 'st.empty()' creates a placeholder element - # We will update this placeholder in real-time - response_placeholder = st.empty() - - for chunk in response: - # Check if the chunk contains new text content - if chunk.choices[0].delta.content is not None: - # Append the new text chunk - response_text += chunk.choices[0].delta.content - # Update the placeholder with the new text - # The 'โ–Œ' character simulates a typing cursor - response_placeholder.markdown(response_text + "โ–Œ") - - # Show the final, complete response - response_placeholder.markdown(response_text) - - # 5. Add the final AI response to session state - st.session_state.messages.append( - {"role": "assistant", "content": response_text} - ) - - # 6. Save the updated chat to disk - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - - except Exception as e: - # Handle potential API errors gracefully - st.error(f"An API error occurred: {str(e)}") - st.info("Please check your API key or try a different model.") - # Remove the user's message if the API call failed - st.session_state.messages.pop() - -# --- Final auto-save (backup) --- -# This saves the chat one last time when the script finishes its run -# This is a good safety net -if st.session_state.messages: - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - -# --- Footer --- -st.divider() -st.caption("Built with Streamlit | Powered by OpenAI | ยฉ 2025 QUEST AND CROSSFIREโ„ข | Licensed under GPL-3.0") diff --git a/Asheesh_Ranjan_Srivastava/Day-3/requirements.txt b/Asheesh_Ranjan_Srivastava/Day-3/requirements.txt deleted file mode 100644 index 84b27b7..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -streamlit>=1.28.0 -openai>=1.0.0 diff --git a/Asheesh_Ranjan_Srivastava/Day-3/secrets.toml.example b/Asheesh_Ranjan_Srivastava/Day-3/secrets.toml.example deleted file mode 100644 index d10523a..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/secrets.toml.example +++ /dev/null @@ -1,19 +0,0 @@ -# ======================================== -# STREAMLIT SECRETS CONFIGURATION -# ======================================== -# This file shows you what secrets you need to configure. -# -# SETUP INSTRUCTIONS: -# 1. Copy this file and rename it to "secrets.toml" (remove .example) -# 2. Fill in your actual OpenAI API key below -# 3. NEVER commit secrets.toml to Git (it's in .gitignore) -# -# For local development: Put secrets.toml in .streamlit/ folder -# For Streamlit Cloud: Add these in the app settings under "Secrets" - -# ======================================== -# OPENAI API KEY -# ======================================== -# Get your key from: https://platform.openai.com/api-keys -# OpenAI provides access to GPT models (GPT-3.5, GPT-4, etc.) -OPENAI_API_KEY = "your_openai_api_key_here" diff --git a/Asheesh_Ranjan_Srivastava/Day-3/session_logs/CHATBOT_SETUP_2025-10-30.md b/Asheesh_Ranjan_Srivastava/Day-3/session_logs/CHATBOT_SETUP_2025-10-30.md deleted file mode 100644 index d1a2130..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-3/session_logs/CHATBOT_SETUP_2025-10-30.md +++ /dev/null @@ -1,636 +0,0 @@ -# Multi-Persona Chatbot Setup - Session Checkpoint -**Date:** October 30, 2025 -**Project:** Multi-Persona Chatbot with Export Functionality -**Purpose:** OutSkill AI Engineering Bootcamp 2025 Submission + Portfolio -**Organization:** QUEST AND CROSSFIREโ„ข - ---- - -## ๐Ÿ“‹ Executive Summary - -Successfully created a deployment-ready Multi-Persona Chatbot application for dual-purpose use: -1. **OutSkill AI Engineering Bootcamp 2025** submission -2. **Professional portfolio** showcase - -This checkpoint documents the complete setup process, file structure, attribution strategy, and deployment roadmap. - ---- - -## ๐ŸŽฏ Project Context - -### **Background:** -- **Bootcamp Assignment:** Create a chatbot with persona switching and export functionality -- **Original Development:** Created using Gemini AI assistance during bootcamp -- **Enhancement Session:** Added professional branding, licensing, and documentation -- **Attribution Strategy:** Transparent about base architecture (OutSkill) and AI assistance - -### **Key Decisions:** -1. **Single Version Approach:** ONE codebase serves both bootcamp submission AND portfolio -2. **Transparent Attribution:** Clear credit to OutSkill (base architecture) + AI assistance (Gemini/Claude) + Author (implementation) -3. **Professional Branding:** QUEST AND CROSSFIREโ„ข integration (subtle, non-intrusive) -4. **Open Source License:** GPL-3.0 with trademark protection -5. **Deployment Ready:** Configured for immediate Streamlit Cloud deployment - ---- - -## ๐Ÿ“ Complete File Structure - -``` -D:\Claude\quest-crossfire-chatbot/ -โ”œโ”€โ”€ app.py # Main Streamlit application -โ”œโ”€โ”€ requirements.txt # Python dependencies -โ”œโ”€โ”€ LICENSE # GPL-3.0 with trademark notice -โ”œโ”€โ”€ README.md # Comprehensive documentation -โ”œโ”€โ”€ .gitignore # Git exclusions (secrets, cache) -โ”œโ”€โ”€ .streamlit/ -โ”‚ โ””โ”€โ”€ secrets.toml.example # API key configuration template -โ”œโ”€โ”€ chat_history/ # Auto-created on first run (gitignored) -โ”‚ โ””โ”€โ”€ chat_*.json # Individual chat session files -โ””โ”€โ”€ session_logs/ - โ””โ”€โ”€ CHATBOT_SETUP_2025-10-30.md # This checkpoint document -``` - ---- - -## ๐Ÿ“ Files Created - -### **1. app.py** -**Purpose:** Main chatbot application with persona switching and export functionality - -**Key Features:** -- 4 AI personas (General Assistant, Creative Poet, Technical Coder, Sarcastic Robot) -- Multi-session chat management -- Export to TXT/JSON/CSV -- Real-time streaming responses -- User feedback system (thumbs up/down) -- Persistent JSON storage - -**Attribution Added:** -```python -# ============================================================================== -# Multi-Persona Chatbot with Export Functionality -# ============================================================================== -# -# Copyright (c) 2025 QUEST AND CROSSFIREโ„ข -# Licensed under GPL-3.0 - see LICENSE file for details -# QUEST AND CROSSFIREโ„ข is a trademark. Trademark filings in process. -# -# Author: Asheesh Ranjan Srivastava -# Organization: QUEST AND CROSSFIREโ„ข -# Date: October 30, 2025 -# -# CREDITS & ATTRIBUTION: -# - Base Architecture: OutSkill AI Engineering Bootcamp 2025 -# - AI Assistance: Gemini (Google) & Claude (Anthropic) -# - Implementation & Customization: Asheesh Ranjan Srivastava -# - Persona System: Original implementation by author -# - Export Functionality: Original implementation by author -``` - -**Branding Integration:** -- Page title: "Multi-Persona Chatbot | QUEST AND CROSSFIREโ„ข" -- Footer caption: "Part of QUEST AND CROSSFIREโ„ข | OutSkill AI Engineering Bootcamp 2025" -- GPL-3.0 license notice in footer - -**File Location:** `D:\Claude\quest-crossfire-chatbot\app.py` - ---- - -### **2. LICENSE** -**Purpose:** Legal protection and open source licensing - -**License Type:** GPL-3.0 (GNU General Public License v3) - -**Key Sections:** -1. **Copyright Notice:** "Copyright (c) 2025 QUEST AND CROSSFIREโ„ข" -2. **Trademark Protection:** Notice that QUEST AND CROSSFIREโ„ข is a protected trademark (filings in process) -3. **Project Attribution:** Credits OutSkill (base architecture), Gemini/Claude (AI assistance), and Asheesh Ranjan Srivastava (implementation) -4. **Reference to Full License:** Links to https://www.gnu.org/licenses/gpl-3.0.en.html - -**Why GPL-3.0:** -- Allows free use, modification, distribution -- Requires derivatives to remain open source -- Protects author's work from proprietary forks -- Industry-standard for open source projects - -**File Location:** `D:\Claude\quest-crossfire-chatbot\LICENSE` - ---- - -### **3. requirements.txt** -**Purpose:** Python dependency specification for deployment - -**Dependencies:** -``` -streamlit>=1.28.0 -openai>=1.0.0 -``` - -**Notes:** -- Minimal dependencies (only 2 packages) -- OpenAI library used for OpenRouter API client -- Version constraints allow updates while maintaining compatibility - -**File Location:** `D:\Claude\quest-crossfire-chatbot\requirements.txt` - ---- - -### **4. .gitignore** -**Purpose:** Protect sensitive information and prevent committing unnecessary files - -**Key Exclusions:** -- `.streamlit/secrets.toml` - Protects OpenRouter API key -- `chat_history/` - Local chat data (ephemeral on cloud deployment) -- `__pycache__/`, `*.pyc` - Python cache files -- `venv/`, `.env` - Virtual environments and environment variables -- IDE settings (.vscode, .idea) -- OS files (.DS_Store, Thumbs.db) - -**Security Note:** Critical for preventing API key exposure in public repositories - -**File Location:** `D:\Claude\quest-crossfire-chatbot\.gitignore` - ---- - -### **5. .streamlit/secrets.toml.example** -**Purpose:** Template showing users how to configure OpenRouter API key - -**Content:** -```toml -# ======================================== -# OPENROUTER API KEY -# ======================================== -# Get your key from: https://openrouter.ai/keys -OPENROUTER_API_KEY = "your_openrouter_api_key_here" -``` - -**Usage Instructions:** -1. Copy to `.streamlit/secrets.toml` (remove .example) -2. Replace placeholder with actual API key -3. Never commit `secrets.toml` to Git (protected by .gitignore) - -**For Streamlit Cloud:** -- Add in app settings โ†’ Secrets section -- Paste same TOML format - -**File Location:** `D:\Claude\quest-crossfire-chatbot\.streamlit\secrets.toml.example` - ---- - -### **6. README.md** -**Purpose:** Comprehensive documentation for bootcamp submission AND portfolio showcase - -**Structure (27 sections):** -1. Project title and branding -2. Badges (License, Python, Streamlit) -3. Project context (OutSkill bootcamp) -4. Features overview (personas, sessions, export) -5. Tech stack table -6. Quick start guide -7. API key setup instructions -8. Usage guide (new chat, personas, export) -9. Project structure diagram -10. Educational value (skills demonstrated) -11. Known limitations (ephemeral storage) -12. Future enhancements roadmap -13. Credits & attribution (detailed) -14. License information -15. Trademark notice -16. Deployment instructions -17. Support & contact -18. Bootcamp submission checklist - -**Key Features:** -- Dual-purpose (bootcamp + portfolio) -- Professional formatting -- Clear installation steps -- Transparent attribution -- Deployment-ready instructions -- Addresses known limitations upfront - -**File Location:** `D:\Claude\quest-crossfire-chatbot\README.md` - ---- - -### **7. session_logs/CHATBOT_SETUP_2025-10-30.md** -**Purpose:** This checkpoint document - -**File Location:** `D:\Claude\quest-crossfire-chatbot\session_logs\CHATBOT_SETUP_2025-10-30.md` - ---- - -## ๐ŸŽ“ Attribution Structure - -### **Transparent Credit Model:** - -**OutSkill AI Engineering Bootcamp 2025:** -- Base architecture and assignment concept -- Core requirements (persona switching, chat history, export) -- Learning objectives and evaluation criteria - -**AI Assistance:** -- **Gemini (Google):** Initial code development during bootcamp -- **Claude (Anthropic):** Professional setup, branding, documentation - -**Asheesh Ranjan Srivastava:** -- Implementation and customization -- Persona system design -- Export functionality design -- Integration and testing -- QUEST AND CROSSFIREโ„ข branding -- Professional documentation - -**Why This Matters:** -- Industry-standard practice (developers use AI tools daily) -- Demonstrates learning and adaptation skills -- Shows professional code documentation -- Transparent about development process -- Suitable for bootcamp submission AND portfolio - ---- - -## โœ… Bootcamp Submission Checklist - -### **Required Tasks:** -- [x] Multi-persona chatbot functionality -- [x] Chat session management -- [x] Conversation export (TXT, JSON, CSV) -- [x] Persistent storage (JSON-based) -- [x] Clean, documented code -- [x] Professional project structure -- [x] README with setup instructions -- [x] requirements.txt for dependencies -- [x] Proper licensing (GPL-3.0) - -### **Professional Enhancements:** -- [x] Comprehensive attribution -- [x] QUEST AND CROSSFIREโ„ข branding -- [x] Trademark protection notice -- [x] Educational value documentation -- [x] Known limitations disclosure -- [x] Future enhancement roadmap -- [x] Deployment instructions - -### **Before Submission:** -- [ ] Test all features locally -- [ ] Export sample conversation (include in submission) -- [ ] Screenshot of working app -- [ ] Document any challenges faced (optional) -- [ ] Optional: Deploy to Streamlit Cloud and include live URL - ---- - -## ๐Ÿš€ Deployment Roadmap - -### **Phase 1: GitHub Repository (Ready Now)** - -**Steps:** -1. Initialize Git repository: - ```bash - cd D:\Claude\quest-crossfire-chatbot - git init - git add . - git commit -m "Initial commit: Multi-Persona Chatbot for OutSkill Bootcamp 2025" - ``` - -2. Create GitHub repository: - - Go to https://github.com/new - - Repository name: `quest-crossfire-chatbot` - - Description: "Multi-Persona Chatbot | OutSkill AI Engineering Bootcamp 2025 | QUEST AND CROSSFIREโ„ข" - - Public repository - - Do NOT initialize with README (already exists) - -3. Push to GitHub: - ```bash - git remote add origin https://github.com/YOUR_USERNAME/quest-crossfire-chatbot.git - git branch -M main - git push -u origin main - ``` - -4. Update README.md: - - Replace `YOUR_USERNAME` with actual GitHub username (line 86) - ---- - -### **Phase 2: Streamlit Cloud Deployment (When Ready)** - -**Prerequisites:** -- GitHub repository created and pushed -- OpenRouter API key ready -- Streamlit account created - -**Deployment Steps:** - -1. **Go to Streamlit Cloud:** - - Visit https://share.streamlit.io/ - - Sign in with GitHub - -2. **Create New App:** - - Click "New app" - - Repository: `YOUR_USERNAME/quest-crossfire-chatbot` - - Branch: `main` - - Main file path: `app.py` - -3. **Configure Secrets:** - - Click "Advanced settings" - - Go to "Secrets" section - - Paste: - ```toml - OPENROUTER_API_KEY = "sk-or-v1-your_actual_key_here" - ``` - -4. **Deploy:** - - Click "Deploy!" - - Wait 2-3 minutes for deployment - - Default URL: `https://YOUR_USERNAME-quest-crossfire-chatbot.streamlit.app` - -5. **Custom Subdomain (Optional):** - - Go to Settings โ†’ General โ†’ App URL - - Choose custom subdomain (e.g., `quest-chatbot`) - - Final URL: `https://quest-chatbot.streamlit.app` - -**Note on Custom Domains:** -- Streamlit FREE tier does NOT support custom domains -- Requires Streamlit Team plan ($20/month per editor) -- Streamlit URL is acceptable for bootcamp and portfolio - ---- - -### **Phase 3: Local Testing (Before Deployment)** - -**Setup:** -1. Get OpenRouter API key from https://openrouter.ai/keys -2. Create `.streamlit/secrets.toml`: - ```bash - cd D:\Claude\quest-crossfire-chatbot - mkdir .streamlit - cp .streamlit/secrets.toml.example .streamlit/secrets.toml - ``` -3. Edit `.streamlit/secrets.toml` with actual API key - -**Run Locally:** -```bash -streamlit run app.py -``` - -**Test Checklist:** -- [ ] App loads without errors -- [ ] Can create new chat -- [ ] Can switch personas -- [ ] Messages send and receive -- [ ] AI responses stream correctly -- [ ] Export to TXT works -- [ ] Export to JSON works -- [ ] Export to CSV works -- [ ] Chat sessions save and load -- [ ] Delete chat works -- [ ] Feedback buttons work (decorative) - ---- - -## โš ๏ธ Known Limitations - -### **1. Ephemeral Storage on Streamlit Cloud** - -**Issue:** -- Chat history stored in local JSON files (`chat_history/` folder) -- Streamlit Cloud uses ephemeral filesystem -- All chat history lost when app restarts/sleeps -- Typically happens after 7 days of inactivity - -**Impact:** -- Suitable for bootcamp demonstration -- NOT suitable for production use -- Users should export important conversations - -**Workarounds:** -1. Use export functionality before app sleeps -2. Treat as session-only conversations -3. For production: Implement database (PostgreSQL, SQLite, MongoDB) - -**For Bootcamp:** -- This is ACCEPTABLE for demonstration -- Shows file I/O skills -- Documents known limitation professionally -- Real production apps use databases - ---- - -### **2. Feedback System** - -**Current Status:** -- Thumbs up/down buttons are decorative -- Feedback not persisted to storage -- No analytics dashboard - -**Future Enhancement:** -- Save feedback to database -- Analytics for response quality -- Use feedback to improve personas - ---- - -### **3. API Costs** - -**OpenRouter Pricing:** -- Pay-per-use model -- Mistral 7B Instruct: ~$0.0002 per request -- Free tier includes $1 credit -- Monitor usage at https://openrouter.ai/activity - -**Recommendation:** -- Set spending limit in OpenRouter dashboard -- Monitor token usage -- For production: Implement usage tracking - ---- - -## ๐Ÿ”— Related Projects - -### **Obsidian AI Learning Assistant** -- **Description:** AI-powered learning assistant for Obsidian note-taking -- **Deployment:** https://aethelgard-obsidian.streamlit.app -- **Organization:** Aethelgard Academyโ„ข (under QUEST AND CROSSFIREโ„ข) -- **Tech Stack:** Streamlit, OpenAI API, Hugging Face API -- **Status:** Live and operational - -**Connection to This Project:** -- Both part of QUEST AND CROSSFIREโ„ข ecosystem -- Both deployed on Streamlit Cloud -- Both demonstrate AI integration skills -- Portfolio showcases multiple AI projects - ---- - -## ๐Ÿ“Š Skills Demonstrated - -### **Technical Skills:** -1. โœ… Streamlit web application development -2. โœ… API integration (OpenRouter/OpenAI) -3. โœ… Session state management -4. โœ… File I/O operations (JSON, CSV, TXT) -5. โœ… Real-time streaming responses -6. โœ… Multi-persona system architecture -7. โœ… Error handling and user feedback -8. โœ… Git version control -9. โœ… Professional code documentation - -### **Professional Practices:** -1. โœ… Comprehensive README creation -2. โœ… Proper project structure -3. โœ… Open source licensing (GPL-3.0) -4. โœ… Transparent AI attribution -5. โœ… Known limitations disclosure -6. โœ… Deployment-ready code -7. โœ… User-focused design -8. โœ… Professional branding integration - ---- - -## ๐ŸŽฏ Success Criteria - -### **For Bootcamp Submission:** -- [x] Meets all assignment requirements -- [x] Professional code quality -- [x] Comprehensive documentation -- [x] Transparent attribution -- [x] Deployment-ready -- [x] Demonstrates learning - -### **For Portfolio:** -- [x] Shows technical proficiency -- [x] Professional presentation -- [x] Real-world deployment capability -- [x] Open source contribution -- [x] Brand integration (QUEST AND CROSSFIREโ„ข) - ---- - -## ๐Ÿ“… Timeline - -**October 30, 2025:** -- โœ… Original chatbot code developed (Gemini assistance) -- โœ… Professional setup and branding added (Claude assistance) -- โœ… GPL-3.0 license created -- โœ… Comprehensive README written -- โœ… Deployment configuration completed -- โœ… Session checkpoint created - -**Next Steps (User Timeline):** -1. Test locally -2. Push to GitHub -3. Submit to bootcamp -4. Deploy to Streamlit Cloud (optional) -5. Add to portfolio - ---- - -## ๐Ÿ”ฎ Future Enhancements - -### **Priority 1: Persistent Storage** -- Replace JSON files with PostgreSQL database -- Implement user authentication -- Deploy to Heroku/Railway with database add-on - -### **Priority 2: Enhanced Features** -- Conversation search functionality -- Tagging system for chats -- Share conversations via URL -- Analytics dashboard - -### **Priority 3: Persona System** -- User-defined custom personas -- Persona marketplace -- Import/export persona templates -- Persona performance analytics - -### **Priority 4: Advanced Export** -- PDF export with formatting -- Markdown export for Obsidian -- Email conversation summaries -- Cloud backup integration - ---- - -## ๐Ÿ“ Notes for Future Self - -### **Important Reminders:** -1. **Trademark Format:** Always use "QUEST AND CROSSFIREโ„ข" (ALL CAPS, no "&") -2. **Attribution:** Keep OutSkill and AI assistance credits in all versions -3. **License:** GPL-3.0 must remain on derivatives -4. **API Keys:** Never commit `.streamlit/secrets.toml` to Git -5. **Storage:** Streamlit Cloud has ephemeral filesystem - -### **If Deploying to Production:** -1. Implement database for persistent storage -2. Add user authentication -3. Set up monitoring and analytics -4. Implement rate limiting -5. Add error logging -6. Create backup system -7. Consider custom domain (requires Team plan) - -### **If Forking for Another Project:** -1. Update branding (remove QUEST AND CROSSFIREโ„ข if not affiliated) -2. Keep GPL-3.0 license -3. Update README with new project details -4. Maintain attribution to original author - ---- - -## ๐Ÿ† Achievements - -**This Project Successfully Demonstrates:** - -1. **Learning Agility:** Adapted bootcamp assignment into professional portfolio piece -2. **Technical Proficiency:** Full-stack Streamlit application with API integration -3. **Professional Practices:** Licensing, documentation, attribution, version control -4. **AI Tool Usage:** Effective use of AI assistance (industry standard) -5. **Brand Building:** Integration of QUEST AND CROSSFIREโ„ข ecosystem -6. **Open Source Contribution:** GPL-3.0 licensed, ready for community use - ---- - -## ๐Ÿ“ž Support & Resources - -### **Documentation:** -- [Streamlit Documentation](https://docs.streamlit.io/) -- [OpenRouter API Docs](https://openrouter.ai/docs) -- [GPL-3.0 License](https://www.gnu.org/licenses/gpl-3.0.en.html) - -### **Project Links:** -- **Repository:** (To be created on GitHub) -- **Live Demo:** (To be deployed on Streamlit Cloud) -- **Organization:** [QUEST AND CROSSFIREโ„ข](https://questandcrossfire.com) - -### **Related Projects:** -- **Obsidian AI Assistant:** https://aethelgard-obsidian.streamlit.app - ---- - -## โœ… Checkpoint Status - -**All Tasks Completed:** -1. โœ… Created project folder structure -2. โœ… Enhanced app.py with attribution and branding -3. โœ… Created GPL-3.0 LICENSE -4. โœ… Created requirements.txt -5. โœ… Created .gitignore -6. โœ… Created .streamlit/secrets.toml.example -7. โœ… Created comprehensive README.md -8. โœ… Created this session checkpoint - -**Project Status:** โœ… **READY FOR BOOTCAMP SUBMISSION** - -**Next Action:** User testing, GitHub push, and bootcamp submission - ---- - -**Checkpoint Created:** October 30, 2025 -**Document Version:** 1.0 -**Status:** Complete - ---- - -**ยฉ 2025 QUEST AND CROSSFIREโ„ข | Licensed under GPL-3.0** -*OutSkill AI Engineering Bootcamp 2025* diff --git a/Asheesh_Ranjan_Srivastava/Day-4/.gitignore b/Asheesh_Ranjan_Srivastava/Day-4/.gitignore deleted file mode 100644 index e43b69a..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/.gitignore +++ /dev/null @@ -1,103 +0,0 @@ -# Credentials and Secrets -*.env -*.key -*.pem -credentials.json -secrets.json - -# n8n specific -.n8n/ -n8n-local.settings.json -*credentials*.json - -# API Keys -*apikey* -*api_key* -*API_KEY* - -# OAuth tokens -*token*.json -*oauth*.json - -# Configuration files with sensitive data -config.json -settings.json - -# Personal data -profile_data.json -personal_info.json - -# Google Sheets local cache -*.gsheet - -# Backup files -*.backup -*.bak -*~ - -# Log files -*.log -logs/ -*.logs - -# Temporary files -*.tmp -*.temp -.DS_Store -Thumbs.db - -# IDE and editor files -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# Python virtual environments (if using Python scripts) -venv/ -env/ -.venv/ -.env/ - -# Node modules (if using JavaScript/Node.js) -node_modules/ -package-lock.json -yarn.lock - -# Database files -*.db -*.sqlite -*.sqlite3 - -# Compiled files -*.pyc -__pycache__/ -*.class -*.o - -# Operating system files -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db - -# Archive files (may contain sensitive data) -*.zip -*.tar.gz -*.rar -*.7z - -# Documentation with API keys (if accidentally created) -*_PRIVATE.md -*_SECRET.md - -# n8n workflow exports WITH credentials -*_with_creds.json -*_credentials.json - -# Test data with real information -test_data_real.json -production_data.json diff --git a/Asheesh_Ranjan_Srivastava/Day-4/CREDENTIALS.md b/Asheesh_Ranjan_Srivastava/Day-4/CREDENTIALS.md deleted file mode 100644 index 9575ffa..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/CREDENTIALS.md +++ /dev/null @@ -1,464 +0,0 @@ -# Credentials Setup Guide - -Complete guide for configuring all API credentials and OAuth connections needed for the LinkedIn job automation workflow. - ---- - -## Overview - -This workflow requires 4 main credential connections: -1. **Gmail OAuth2** - Read emails and mark as read -2. **OpenAI API** - AI text generation (extraction, rating, cover letters, video scripts) -3. **HeyGen API** - AI video generation (optional) -4. **Google Sheets OAuth2** - Save application data - ---- - -## 1. Gmail OAuth2 Credentials - -### Why Needed: -- Read LinkedIn job alert emails -- Extract full email content via Gmail API -- Mark emails as read after processing - -### Setup Instructions: - -#### A. Enable Gmail API in Google Cloud: - -1. Go to [Google Cloud Console](https://console.cloud.google.com/) -2. Create a new project: - - Click "Select a project" โ†’ "New Project" - - Name: "LinkedIn Job Automation" (or your choice) - - Click "Create" - -3. Enable Gmail API: - - Navigate to "APIs & Services" โ†’ "Library" - - Search for "Gmail API" - - Click on it โ†’ Click "Enable" - -4. Create OAuth 2.0 Credentials: - - Go to "APIs & Services" โ†’ "Credentials" - - Click "Create Credentials" โ†’ "OAuth client ID" - - Configure consent screen if prompted: - - User Type: External - - App name: "LinkedIn Job Automation" - - User support email: Your email - - Developer contact: Your email - - Save and Continue through all steps - -5. Create OAuth Client: - - Application type: **Web application** - - Name: "n8n Gmail Integration" - - Authorized redirect URIs: Add these URLs: - ``` - https://app.n8n.cloud/rest/oauth2-credential/callback - http://localhost:5678/rest/oauth2-credential/callback - ``` - (Use first one for n8n Cloud, second for self-hosted) - -6. **Save These Values:** - - Client ID: `123456789.apps.googleusercontent.com` - - Client Secret: `GOCSPX-abc123...` - -#### B. Add to n8n: - -1. In n8n, go to **Credentials** โ†’ **Add Credential** -2. Search and select **"Gmail OAuth2 API"** -3. Enter: - - **Client ID:** [Your Client ID from step A6] - - **Client Secret:** [Your Client Secret from step A6] -4. Click **"Sign in with Google"** -5. Select your Gmail account -6. Grant permissions: - - โœ… Read emails - - โœ… Modify emails - - โœ… Manage labels -7. Click **"Save"** - -#### C. Test Connection: - -In n8n workflow: -1. Click "Get LinkedIn Email IDs" node -2. Select your Gmail credential from dropdown -3. Click "Execute Node" -4. Should show recent emails - ---- - -## 2. OpenAI API Credentials - -### Why Needed: -- **GPT-5:** Extract job details from HTML (high accuracy) -- **GPT-4o-latest:** Rate job fit + Generate cover letters (quality critical) -- **GPT-4o-mini:** Create video scripts (cost-optimized) - -### Setup Instructions: - -#### A. Get OpenAI API Key: - -1. Go to [OpenAI Platform](https://platform.openai.com/) -2. Sign up or log in -3. Navigate to [API Keys](https://platform.openai.com/api-keys) -4. Click **"Create new secret key"** -5. Name: "n8n LinkedIn Automation" -6. **IMPORTANT:** Copy the key immediately (you won't see it again!) - ``` - sk-proj-abc123xyz789... - ``` - -#### B. Add Credits: - -1. Go to [Billing](https://platform.openai.com/account/billing) -2. Add payment method -3. Set usage limits (recommended: $20-50/month for this workflow) -4. Monitor usage regularly - -#### C. Add to n8n: - -1. In n8n, go to **Credentials** โ†’ **Add Credential** -2. Select **"OpenAI API"** -3. Enter: - - **API Key:** [Your sk-proj-... key from step A6] -4. Click **"Save"** - -#### D. Test Connection: - -In n8n workflow: -1. Click "Extract Job Data" node -2. Select your OpenAI credential -3. Click "Execute Node" -4. Should show successful completion - -### Cost Monitoring: - -Check usage at: https://platform.openai.com/usage - -**Expected Costs (50 jobs/day):** -- GPT-5 extraction: ~$5-7/day -- GPT-4o-latest (rating + letters): ~$5-7/day -- GPT-4o-mini (video scripts): ~$0.50/day -- **Total:** ~$10-15/day - -**Cost Optimization:** -- Process fewer jobs (adjust Gmail filter) -- Use GPT-4o-mini for more tasks (lower quality but cheaper) -- Implement HTML parsing (saves 99% extraction cost) - ---- - -## 3. HeyGen API Credentials (Optional) - -### Why Needed: -- Generate AI avatar videos for high-match jobs (rating โ‰ฅ4) -- Creates 45-second personalized application videos - -### Setup Instructions: - -#### A. Get HeyGen API Key: - -1. Go to [HeyGen](https://heygen.com/) -2. Sign up for account -3. Navigate to **Account** โ†’ **API** -4. Click **"Generate API Key"** -5. Copy the key: - ``` - HG-abc123xyz789... - ``` - -#### B. Add Credits/Plan: - -HeyGen uses credits or subscription: -- Check pricing: https://heygen.com/pricing -- Videos cost ~$0.30-0.50 per minute -- 45-second videos = ~$0.25-0.35 each -- Expected: 20 videos/day = ~$5-7/day - -#### C. Add to n8n: - -HeyGen uses HTTP Header authentication: - -1. In n8n, go to **Credentials** โ†’ **Add Credential** -2. Select **"HTTP Header Auth"** or **"Header Auth"** -3. Enter: - - **Name:** `heygenApiKey` (exactly this name!) - - **Header Name:** `X-Api-Key` - - **Value:** [Your HeyGen API key] -4. Click **"Save"** - -#### D. Configure in Workflow: - -1. Click "Generate Video (HeyGen)" node -2. Under Authentication: - - Type: Generic Credential Type - - Credential Type: httpHeaderAuth - - Select: Your "heygenApiKey" credential -3. Repeat for "Check Video Status" node - -#### E. Test Connection: - -1. Click "Generate Video (HeyGen)" node -2. Execute with test data -3. Should return video_id in response - -### Optional: Skip Video Generation - -Don't want to use HeyGen? You can disable video generation: - -1. Delete "Generate Video (HeyGen)" node -2. Delete "Wait 30 Seconds" node -3. Delete "Check Video Status" node -4. Delete "IF Video Complete" node -5. Connect "Create Video Script" directly to "Save to Sheet" -6. Workflow will save video scripts but not create videos - ---- - -## 4. Google Sheets OAuth2 Credentials - -### Why Needed: -- Save all job application data -- Track applications over time -- Access cover letters and video scripts - -### Setup Instructions: - -#### A. Create Google Sheet: - -1. Go to [Google Sheets](https://sheets.google.com/) -2. Create new spreadsheet -3. Name: "LinkedIn Job Applications Tracker" -4. Add columns (exact names): - ``` - Title | Job Description | Link | Date | Rating | Company Name | - Benefits | Location | Match Explanation | Cover Letter | - Video Script | Video URL - ``` -5. Copy Sheet ID from URL: - ``` - https://docs.google.com/spreadsheets/d/YOUR_SHEET_ID_HERE/edit - Example: 1xMELeUYUcGrqctnCQSWOuB586k1NHth0mgLhUVRu6EI - ``` - -#### B. Enable Google Sheets API: - -1. Go to [Google Cloud Console](https://console.cloud.google.com/) -2. Use same project as Gmail (or create new) -3. Enable Google Sheets API: - - APIs & Services โ†’ Library - - Search "Google Sheets API" - - Click Enable - -4. OAuth credentials: - - If you already have OAuth from Gmail setup, you can reuse it - - Otherwise, follow same steps as Gmail OAuth (section 1A) - -#### C. Add to n8n: - -1. In n8n, go to **Credentials** โ†’ **Add Credential** -2. Select **"Google Sheets OAuth2 API"** -3. Enter: - - **Client ID:** [Same as Gmail, from Google Cloud] - - **Client Secret:** [Same as Gmail] -4. Click **"Sign in with Google"** -5. Authorize access to Google Sheets -6. Click **"Save"** - -#### D. Update Workflow: - -1. Click "Save to Sheet (No Video)" node -2. Under "Document": - - Click dropdown - - Select "From list" - - Choose your tracking sheet - OR - - Select "By ID" - - Paste your Sheet ID: `1xMELeUYUcGrqctnCQSWOuB586k1NHth0mgLhUVRu6EI` - -3. Under "Sheet": - - Select the sheet name (default: "Sheet1") - -4. Repeat for "Save to Google Sheet" node - -#### E. Test Connection: - -1. Execute workflow with test data -2. Check Google Sheet -3. Should see new row with job data - ---- - -## Security Best Practices - -### Protect Your Credentials: - -โŒ **NEVER DO THIS:** -- Commit API keys to git -- Share workflow JSON with credentials embedded -- Post API keys in public forums -- Use same API key across multiple projects -- Store keys in plain text files - -โœ… **ALWAYS DO THIS:** -- Use n8n's credential encryption -- Export workflows without credentials -- Rotate API keys every 3-6 months -- Monitor API usage for suspicious activity -- Set spending limits on OpenAI and HeyGen -- Use different credentials for dev/prod -- Review OAuth permissions regularly - -### Data Privacy: - -**What's Stored:** -- Your profile data (in workflow prompts) -- Job application details (in Google Sheet) -- Cover letters with personal info -- Video scripts with your name - -**Privacy Considerations:** -- Google Sheet is private to your account -- n8n credentials are encrypted -- Email content is processed in real-time (not stored) -- API providers (OpenAI, HeyGen) process your data per their privacy policies - -**Recommendations:** -- Review OpenAI privacy policy: https://openai.com/policies/privacy-policy -- Review HeyGen privacy policy: https://heygen.com/privacy -- Don't include sensitive personal data in profile (SSN, passport, etc.) -- Use separate Google Sheet for this workflow (not mixed with sensitive data) - ---- - -## Credential Rotation Schedule - -### Recommended Schedule: - -| Credential | Rotation Frequency | Why | -|------------|-------------------|-----| -| OpenAI API Key | Every 6 months | Security best practice | -| HeyGen API Key | Every 6 months | Security best practice | -| Gmail OAuth | Yearly or if suspicious activity | OAuth tokens auto-refresh | -| Google Sheets OAuth | Yearly or if suspicious activity | OAuth tokens auto-refresh | - -### How to Rotate: - -1. **OpenAI/HeyGen:** - - Generate new API key - - Update in n8n credentials - - Delete old key from provider dashboard - - Test workflow works - -2. **Gmail/Sheets OAuth:** - - Revoke access in Google Account settings - - Re-authorize in n8n - - Test workflow works - ---- - -## Troubleshooting - -### Gmail OAuth Issues: - -**Error: "Access denied" or "Invalid credentials"** -- Solution: Re-authorize Gmail connection in n8n -- Check: OAuth redirect URI matches exactly -- Verify: Gmail API is enabled in Google Cloud - -**Error: "Token expired"** -- Solution: OAuth tokens auto-refresh, but may need re-auth -- Check: n8n has internet access to refresh tokens - -### OpenAI API Issues: - -**Error: "Incorrect API key"** -- Solution: Regenerate key and update in n8n -- Check: Key starts with `sk-proj-` (new format) or `sk-` (old format) - -**Error: "Rate limit exceeded"** -- Solution: Wait 1 minute and retry -- Check: Your usage at https://platform.openai.com/usage -- Consider: Upgrading to higher tier if hitting limits regularly - -**Error: "Insufficient credits"** -- Solution: Add credits in OpenAI billing -- Check: Usage limits and spending limits - -### HeyGen API Issues: - -**Error: "Invalid API key"** -- Solution: Verify key is correct and credential name is exactly `heygenApiKey` -- Check: Header Auth credential uses `X-Api-Key` header name - -**Error: "Insufficient credits"** -- Solution: Add credits or upgrade plan -- Check: HeyGen account balance - -### Google Sheets Issues: - -**Error: "Sheet not found"** -- Solution: Verify Sheet ID is correct -- Check: Sheet is not deleted -- Ensure: OAuth has access to this sheet - -**Error: "Column not found"** -- Solution: Ensure column names match exactly (case-sensitive) -- Check: No extra spaces in column names -- Verify: Sheet has all required columns - ---- - -## Credential Summary - -### Quick Reference: - -``` -Gmail OAuth2: -โ”œโ”€โ”€ Purpose: Read emails, mark as read -โ”œโ”€โ”€ Provider: Google Cloud Console -โ”œโ”€โ”€ Type: OAuth 2.0 -โ”œโ”€โ”€ Scopes: Gmail read/modify -โ””โ”€โ”€ Cost: Free - -OpenAI API: -โ”œโ”€โ”€ Purpose: AI text generation -โ”œโ”€โ”€ Provider: OpenAI Platform -โ”œโ”€โ”€ Type: API Key -โ”œโ”€โ”€ Models: GPT-5, GPT-4o-latest, GPT-4o-mini -โ””โ”€โ”€ Cost: ~$10-15/day (50 jobs) - -HeyGen API: -โ”œโ”€โ”€ Purpose: AI video generation -โ”œโ”€โ”€ Provider: HeyGen -โ”œโ”€โ”€ Type: API Key (Header Auth) -โ”œโ”€โ”€ Feature: Avatar videos -โ””โ”€โ”€ Cost: ~$5-7/day (20 videos) - -Google Sheets OAuth2: -โ”œโ”€โ”€ Purpose: Save application data -โ”œโ”€โ”€ Provider: Google Cloud Console -โ”œโ”€โ”€ Type: OAuth 2.0 -โ”œโ”€โ”€ Access: Sheet read/write -โ””โ”€โ”€ Cost: Free -``` - ---- - -## Need Help? - -### Resources: -- **n8n Credentials Docs:** https://docs.n8n.io/credentials/ -- **Gmail API Docs:** https://developers.google.com/gmail/api -- **OpenAI API Docs:** https://platform.openai.com/docs -- **HeyGen API Docs:** https://docs.heygen.com/ -- **Google Sheets API:** https://developers.google.com/sheets/api - -### Support: -- **n8n Community:** https://community.n8n.io/ -- **OpenAI Support:** https://help.openai.com/ -- **HeyGen Support:** support@heygen.com - ---- - -**Last Updated:** October 31, 2025 -**Author:** Asheesh Ranjan Srivastava -**Project:** OutSkill AI Engineering Bootcamp 2025 - Day 4 diff --git a/Asheesh_Ranjan_Srivastava/Day-4/FILE_SUMMARY.md b/Asheesh_Ranjan_Srivastava/Day-4/FILE_SUMMARY.md deleted file mode 100644 index 443facc..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/FILE_SUMMARY.md +++ /dev/null @@ -1,392 +0,0 @@ -# File Summary - -Quick overview of all files in the Day-4 LinkedIn Job Automation project. - ---- - -## Core Files - -### `workflow.json` -**Purpose:** Complete n8n workflow definition (16 nodes, 860 lines) - -**What it contains:** -- Workflow name and metadata -- 16 node definitions with configurations -- Node connections (workflow flow) -- Credential references (empty - must be configured) -- Trigger schedule (daily 3 AM) -- AI model configurations (GPT-5, GPT-4o-latest, GPT-4o-mini) -- HeyGen video generation settings -- Google Sheets column mappings - -**How to use:** -1. Import into n8n (Workflows โ†’ Import from File) -2. Configure credentials for each node -3. Update Google Sheet ID -4. Customize profile data in prompts -5. Test and activate - -**Important:** -- Does NOT contain API keys (for security) -- Contains MY profile data (customize before use) -- Google Sheet ID must be updated -- Credential references must be configured - ---- - -## Documentation Files - -### `README.md` -**Purpose:** Complete project documentation and learning journey - -**Sections:** -- What the workflow does -- First principles thinking (email vs RSS decision) -- 6 major problem-solving iterations -- What I learned (technical + AI collaboration + systems thinking) -- Architecture decisions -- Human-AI collaboration model -- Key metrics and performance -- Portfolio-worthy factors -- Future optimizations -- License information -- Technical appendix - -**Target Audience:** Anyone reviewing the project, employers, bootcamp evaluators - -**Length:** ~420 lines, comprehensive but readable - ---- - -### `SETUP.md` -**Purpose:** Step-by-step setup instructions for running the workflow - -**Sections:** -- Prerequisites (accounts needed) -- n8n setup (cloud or self-hosted) -- Gmail API configuration -- OpenAI API configuration -- HeyGen API configuration (optional) -- Google Sheets setup -- Workflow import instructions -- Profile data customization -- Testing procedures -- Activation and scheduling -- Cost estimation -- Troubleshooting guide - -**Target Audience:** Users setting up the workflow for the first time - -**Length:** ~350 lines, detailed technical guide - ---- - -### `CREDENTIALS.md` -**Purpose:** Detailed guide for all credential configurations - -**Sections:** -- Overview of all credentials needed -- Gmail OAuth2 setup (step-by-step) -- OpenAI API key setup -- HeyGen API setup -- Google Sheets OAuth2 setup -- Security best practices -- Data privacy considerations -- Credential rotation schedule -- Troubleshooting for each credential type - -**Target Audience:** Users configuring API credentials - -**Length:** ~400 lines, comprehensive security guide - ---- - -### `TESTING.md` -**Purpose:** Comprehensive testing and verification guide for workflow validation - -**Sections:** -- 15 detailed test cases (Email โ†’ URLs โ†’ Scraping โ†’ AI โ†’ Sheets) -- IF node routing fix documentation (October 31, 2025) -- Success criteria for each test -- Troubleshooting for each component -- Cost monitoring guide -- End-to-end workflow test -- Production readiness checklist -- Test results template - -**Target Audience:** Users testing and validating the workflow after setup - -**Length:** ~550 lines, step-by-step testing guide - -**Key Tests:** -- Test 4: IF node routing (recently fixed) -- Test 15: End-to-end comprehensive test -- Cost monitoring throughout - ---- - -### `LICENSE` -**Purpose:** AGPL-3.0 License for the project - -**Contains:** -- GNU Affero General Public License v3.0 text -- Copyright: Asheesh Ranjan Srivastava (2025) -- Additional notices: - - Trademark protection (Aethelgard Academy, Quest And Crossfire - Filed - awaiting certification) - - Bootcamp context attribution - - Human-AI collaboration attribution - - Third-party service notices - - Network use clause (AGPL-3.0 requirement) - - Important disclaimers - -**Why AGPL-3.0?** -- Open source with network use clause (source code must be provided for web services) -- Protects trademarks while allowing code sharing -- Ensures derivatives remain open source -- Consistent with all Quest And Crossfireโ„ข projects - ---- - -### `FILE_SUMMARY.md` -**Purpose:** This file - quick overview of project structure - -**Target Audience:** Anyone trying to understand project organization - ---- - -## Configuration Files - -### `.gitignore` -**Purpose:** Prevent committing sensitive data to git - -**Protects:** -- Credentials and API keys -- OAuth tokens -- Configuration files with secrets -- Personal data -- Log files -- Temporary files -- IDE settings - -**Critical for:** -- Security (prevents API key leaks) -- Privacy (protects personal data) -- Clean repository (excludes temporary files) - ---- - -## File Structure Overview - -``` -Day-4/ -โ”œโ”€โ”€ workflow.json # n8n workflow (CORE FILE) -โ”œโ”€โ”€ README.md # Complete documentation (START HERE) -โ”œโ”€โ”€ SETUP.md # Setup instructions -โ”œโ”€โ”€ CREDENTIALS.md # Credential configuration guide -โ”œโ”€โ”€ TESTING.md # Testing & verification guide -โ”œโ”€โ”€ LICENSE # AGPL-3.0 License -โ”œโ”€โ”€ FILE_SUMMARY.md # This file -โ””โ”€โ”€ .gitignore # Git security (prevents credential leaks) -``` - ---- - -## Quick Start Guide - -**If you're new to this project:** - -1. **Read first:** `README.md` - Understand what this does and the learning journey -2. **Setup:** Follow `SETUP.md` step-by-step to configure everything -3. **Credentials:** Use `CREDENTIALS.md` for detailed credential setup -4. **Import:** Load `workflow.json` into n8n -5. **Customize:** Update profile data in workflow prompts -6. **Test:** Follow `TESTING.md` to verify all 15 components work -7. **Activate:** Enable daily automation - -**If you're evaluating this project:** - -1. **Start with:** `README.md` - See the learning journey and technical decisions -2. **Check:** `LICENSE` - Understand usage rights -3. **Review:** `workflow.json` - Inspect the actual automation logic - -**If you're troubleshooting:** - -1. **Setup issues:** Check `SETUP.md` troubleshooting section -2. **Credential issues:** Check `CREDENTIALS.md` troubleshooting section -3. **Testing issues:** Check `TESTING.md` for test-specific troubleshooting -4. **Workflow errors:** Check node-specific configurations in n8n UI - ---- - -## File Sizes - -| File | Size | Type | -|------|------|------| -| workflow.json | ~30 KB | JSON (n8n workflow) | -| README.md | ~15 KB | Markdown (documentation) | -| SETUP.md | ~18 KB | Markdown (setup guide) | -| CREDENTIALS.md | ~20 KB | Markdown (credential guide) | -| TESTING.md | ~25 KB | Markdown (testing guide) | -| LICENSE | ~2 KB | Text (legal) | -| FILE_SUMMARY.md | ~5 KB | Markdown (this file) | -| .gitignore | ~1 KB | Text (git config) | - -**Total:** ~116 KB (documentation-heavy, as it should be!) - ---- - -## Missing Files (Intentionally) - -### Why no `requirements.txt`? -- This is an n8n workflow (not Python code) -- Dependencies are managed by n8n -- No Python packages to install - -### Why no `app.py` or source code? -- Workflow logic is in `workflow.json` -- n8n uses visual workflow editor -- Custom code is embedded in Code nodes within JSON - -### Why no `config.json` or `settings.json`? -- Configuration is in n8n UI -- Credentials are stored in n8n's encrypted credential system -- Workflow JSON contains node configurations - -### Why no test files? -- Testing is done through n8n's built-in execution testing -- Real data testing (with actual emails) is documented in SETUP.md -- No separate test suite needed for workflow automation - ---- - -## File Relationships - -``` -README.md โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€> Explains WHAT and WHY - โ”‚ - โ”œโ”€โ”€> Links to: SETUP.md (HOW to set up) - โ”œโ”€โ”€> Links to: CREDENTIALS.md (detailed credential help) - โ”œโ”€โ”€> Links to: TESTING.md (verification guide) - โ”œโ”€โ”€> Links to: LICENSE (usage rights) - โ””โ”€โ”€> References: workflow.json (the actual automation) - -SETUP.md โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€> Step-by-step HOW TO - โ”‚ - โ”œโ”€โ”€> References: workflow.json (file to import) - โ”œโ”€โ”€> Links to: CREDENTIALS.md (credential details) - โ”œโ”€โ”€> Links to: TESTING.md (next step after setup) - โ””โ”€โ”€> Prerequisites listed - -CREDENTIALS.md โ”€โ”€โ”€> Detailed credential setup - โ”‚ - โ”œโ”€โ”€> Referenced by: SETUP.md - โ”œโ”€โ”€> Security best practices - โ””โ”€โ”€> Troubleshooting for each credential - -TESTING.md โ”€โ”€โ”€โ”€โ”€โ”€โ”€> Testing & verification guide - โ”‚ - โ”œโ”€โ”€> Referenced by: SETUP.md (test after setup) - โ”œโ”€โ”€> 15 test cases for all components - โ”œโ”€โ”€> Documents IF node fix (Oct 31, 2025) - โ””โ”€โ”€> Production readiness checklist - -workflow.json โ”€โ”€โ”€โ”€> The actual n8n automation - โ”‚ - โ”œโ”€โ”€> Imported into n8n - โ”œโ”€โ”€> Contains all node logic - โ”œโ”€โ”€> Requires credential configuration - โ””โ”€โ”€> Updated with IF node fix - -.gitignore โ”€โ”€โ”€โ”€โ”€โ”€โ”€> Protects all files - โ”‚ - โ””โ”€โ”€> Prevents committing credentials - -LICENSE โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€> Legal terms - โ”‚ - โ””โ”€โ”€> AGPL-3.0 License (copyleft with network use clause) -``` - ---- - -## Version History - -### Current Version: 1.0 (October 31, 2025) -- Initial release -- Complete workflow with 16 nodes -- Comprehensive documentation -- AGPL-3.0 License with trademark protection -- Human-AI collaboration attribution - -### Future Versions: -- 1.1: HTML parsing optimization (99% cost savings) -- 1.2: Enhanced error handling -- 2.0: Multi-platform job board support - ---- - -## Maintenance - -### Keep Updated: -- **workflow.json:** As you customize and improve -- **README.md:** If adding new features or optimizations -- **SETUP.md:** If setup process changes -- **CREDENTIALS.md:** If API configurations change - -### Version Control: -- Use git to track changes -- Always export workflow WITHOUT credentials -- Document major changes in README.md -- Keep .gitignore updated for new credential files - ---- - -## Contributing - -While this is a personal bootcamp project, if you want to improve it: - -1. **Fork and customize** for your own use (AGPL-3.0 allows this) -2. **Share improvements** (optional) - open to learning from others -3. **Credit original work** (required by AGPL-3.0) -4. **Keep same license** (AGPL-3.0 copyleft requirement) -5. **Provide source code** if deployed as web service (AGPL-3.0 network use clause) -6. **Don't include credentials** (security best practice) - ---- - -## Support - -### If you're stuck: - -1. **Check troubleshooting sections:** - - SETUP.md โ†’ Troubleshooting - - CREDENTIALS.md โ†’ Credential-specific troubleshooting - -2. **Review documentation:** - - README.md โ†’ Learning journey (see how issues were solved) - - SETUP.md โ†’ Step-by-step guide - -3. **External resources:** - - n8n Community: https://community.n8n.io/ - - n8n Docs: https://docs.n8n.io/ - - OpenAI Docs: https://platform.openai.com/docs - ---- - -## Final Notes - -**This project demonstrates:** -- Modern workflow automation (n8n) -- Human-AI collaboration (strategic human + technical AI) -- Systems thinking (architecture, error handling, optimization) -- Professional documentation (comprehensive but readable) -- Security best practices (credentials, .gitignore, licensing) - -**Key takeaway:** The documentation is as important as the code. This file structure ensures anyone can understand, set up, and use the workflow successfully. - ---- - -**Last Updated:** October 31, 2025 -**Author:** Asheesh Ranjan Srivastava -**Project:** OutSkill AI Engineering Bootcamp 2025 - Day 4 -**License:** AGPL-3.0 (see LICENSE file) -**Trademarks:** Quest And Crossfireโ„ข, Aethelgard Academyโ„ข (Filed - awaiting certification) diff --git a/Asheesh_Ranjan_Srivastava/Day-4/LICENSE b/Asheesh_Ranjan_Srivastava/Day-4/LICENSE deleted file mode 100644 index b59425a..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/LICENSE +++ /dev/null @@ -1,112 +0,0 @@ -GNU AFFERO GENERAL PUBLIC LICENSE -Version 3, 19 November 2007 - -Copyright (C) 2025 Asheesh Ranjan Srivastava - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . - -============================================================================== - -Automated LinkedIn Job Application System -Email-based workflow automation with AI-powered personalization - -Built for OutSkill AI Engineering Bootcamp 2025 - Day 4 -Quest And Crossfireโ„ข ยฉ 2025 Asheesh Ranjan Srivastava - -============================================================================== - -COPYRIGHT HOLDER RIGHTS: - -As the copyright holder, Asheesh Ranjan Srivastava retains ALL RIGHTS -to use this code in any manner, including: -- Closed-source applications -- Commercial products -- Proprietary derivatives -- Alternative licensing arrangements - -AGPL-3.0 restrictions apply ONLY to derivative works created by others. - -For commercial licensing inquiries or alternative licensing arrangements: -Contact: asheesh.srivastava@questandcrossfire.com - -============================================================================== - -ADDITIONAL NOTICES: - -1. TRADEMARKS - - "Aethelgard Academy" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - "Quest And Crossfire" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - Use of these trademarks requires explicit permission - -2. AI ATTRIBUTION - This software was developed with assistance from: - - Claude Code (Anthropic) for technical implementation and debugging - - Human strategic decisions and quality control by Asheesh Ranjan Srivastava - - This represents human-AI collaboration in modern software development - -3. THIRD-PARTY SERVICES AND APIS - This workflow uses the following third-party services: - - n8n (Fair Code License - Apache 2.0 with Commons Clause) - - Gmail API (Google) - - OpenAI API (GPT-5, GPT-4o-latest, GPT-4o-mini) - - HeyGen API (AI video generation) - - Google Sheets API (Google) - - Each service is subject to its respective terms of service and pricing. - -4. BOOTCAMP ATTRIBUTION - This project was created as part of the OutSkill AI Engineering Bootcamp 2025. - Base architecture and concepts provided by OutSkill. - Strategic design, architecture, and implementation by Asheesh Ranjan Srivastava. - -5. DATA REQUIREMENTS - This workflow requires: - - API keys and credentials (not included) - - Personal profile data customization - - Google Sheet IDs and credential references - - Rate limits and costs apply based on API usage - - No warranty provided for production use or data privacy - -6. COPYLEFT NOTICE (AGPL-3.0) - Under AGPL-3.0, any derivative works or modifications must also be - released under AGPL-3.0 or compatible license. - - If you use this code in your project, you must: - - Make your source code available - - License your project under AGPL-3.0 - - Attribute the original work - - State your changes - - If used in a web service, provide source code to users - - AGPL-3.0 NETWORK USE CLAUSE: - If you run a modified version of this software as a web service - (e.g., n8n cloud, self-hosted automation), you MUST make the - complete source code available to users of that service. - -============================================================================== - -For the complete GNU Affero General Public License v3.0 text, see: -https://www.gnu.org/licenses/agpl-3.0.txt - -============================================================================== - -CONTACT: -Author: Asheesh Ranjan Srivastava -Email: asheesh.srivastava@questandcrossfire.com -Brand: Quest And Crossfire -Project: Aethelgard Academy - -โ—‡ Where chaos becomes clarity. Small fixes, big clarity. diff --git a/Asheesh_Ranjan_Srivastava/Day-4/README.md b/Asheesh_Ranjan_Srivastava/Day-4/README.md deleted file mode 100644 index d2075c7..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/README.md +++ /dev/null @@ -1,469 +0,0 @@ -# Day 4: Automated LinkedIn Job Application System -## n8n Workflow Automation with AI Video Generation - -**Project:** Email-Based Job Application Automation -**Date:** October 31, 2025 -**Author:** Asheesh Ranjan Srivastava -**Collaboration:** Human-AI Partnership (Claude Code for technical implementation) - -[![License: AGPL-3.0](https://img.shields.io/badge/License-AGPL%203.0-blue.svg)](LICENSE) -[![n8n](https://img.shields.io/badge/n8n-Workflow-orange.svg)](https://n8n.io/) -[![OpenAI](https://img.shields.io/badge/OpenAI-GPT--4%20%7C%20GPT--5-blue.svg)](https://openai.com/) -[![HeyGen](https://img.shields.io/badge/HeyGen-AI%20Video-purple.svg)](https://heygen.com/) - -> **๐Ÿ“‹ Update (Oct 31, 2025):** Fixed IF node routing condition. Changed from `notEmpty` to `startsWith "https://"` for proper URL validation. See [TESTING.md](TESTING.md) Test 4 for verification steps. - ---- - -## What This Does - -Automatically processes LinkedIn job alert emails every day at 3 AM: -1. Fetches unread LinkedIn job emails -2. Extracts 4-6 job URLs per email (handles 50+ jobs/day) -3. Scrapes each job page for details -4. Rates job fit (0-5 scale) against my profile -5. Generates personalized cover letters for all jobs -6. Creates AI video scripts for high-match jobs (rating โ‰ฅ4) -7. Generates AI avatar videos via HeyGen API (future feature) -8. Saves everything to Google Sheets for tracking - -**Result:** 20+ personalized job applications daily with zero manual effort. - ---- - -## The Real Learning Journey - -### First Principles Thinking: Why Email Over RSS? - -The bootcamp taught RSS-based job scraping. I chose a different path. - -**The Problem with RSS:** -- RSS feeds are generic (LinkedIn shows ALL jobs, not jobs FOR ME) -- No signal about job relevance to my profile -- Delayed updates (hours/days old) -- Can't differentiate quality matches from spam - -**Why Email Was Better:** -- **LinkedIn's pre-filtering:** Email alerts = jobs their algorithm already matched to my profile -- **Leveraging $13B infrastructure:** Their recommendation engine does the hard work for free -- **Real-time signal:** Emails arrive when jobs are posted (early applicants get priority) -- **Quality over quantity:** 6 relevant jobs per email vs. 100 generic RSS entries - -**First Principle:** Don't build what already exists. LinkedIn spent billions building a job matching algorithm. My system uses their work as input, not duplicate it. - -**Systems Thinking:** Better inputs โ†’ better outputs. Garbage in, garbage out. Email alerts are higher-quality input than RSS feeds. - ---- - -## Problem-Solving Journey: 6 Major Iterations - -### Iteration 1: Gmail Parsing Failed (The Big Blocker) -**Problem:** n8n's Gmail node returned emails WITHOUT the actual content -**What I Did:** Tested the node, noticed `payload.parts[]` was missing -**AI's Role:** Suggested changing `simplify: false` and `format: "full"` -**Result:** Still didn't work! Gmail node was filtering the data. - -**Learning:** Sometimes tools have hidden limitations. Need to go deeper. - ---- - -### Iteration 2: Direct Gmail API Bypass -**Problem:** n8n Gmail node couldn't be fixed -**What I Did:** Decided to bypass n8n's node and call Gmail API directly -**AI's Role:** Implemented HTTP Request node with Gmail API endpoint: `format=full` -**Result:** SUCCESS! Got full email payload with `parts[]` array. - -**Learning:** When a tool has limitations, go to the source. APIs > abstractions. - ---- - -### Iteration 3: URL Extraction Returned 0 Results -**Problem:** Regex pattern found ZERO LinkedIn job URLs -**What I Did:** -- Added debug logging to see actual email HTML -- Provided Claude with real email snippets -- Validated extraction logic step-by-step - -**AI's Discovery:** LinkedIn uses `/comm/jobs/view/4281253503` in emails, NOT `/jobs/view/`! -**Result:** Changed pattern to `/comm/jobs/view/\d+` โ†’ Found all 6 URLs! - -**Learning:** Don't assume patterns. Debug with real data. The devil is in the details. - ---- - -### Iteration 4: n8n Return Format Error (The Frustrating One) -**Problem:** Error: "A 'json' property isn't an object [item 0]" -**Console:** Showed extraction working perfectly (6 URLs found) -**n8n:** Rejected the output with validation error - -**What Happened:** -- Tried 5+ different return formats -- Switched from Claude Sonnet 4.5 to Opus 4.1 (Sonnet couldn't solve it) -- **Opus found the issue:** Return format must ALWAYS be array with `{json: {...}}` structure -- Even empty cases must return `[{json: {status: 'no_urls'}}]` not `[]` - -**Learning:** When one AI model gets stuck, switch to more capable model. Meta-skill: knowing when to escalate tools. - ---- - -### Iteration 5: IF Node Routing Bug -**Problem:** Extracted 6 valid URLs but ALL went to False branch -**What I Did:** Updated IF condition from `status === "success"` to `jobUrl is not empty` -**Result:** Still failed! All 6 items routed to False. - -**AI's Diagnosis:** n8n's "is not empty" operator was buggy -**Solution Options:** -1. Change to `jobUrl exists` -2. Change to `jobUrl starts with "https://"` -3. **Just delete the IF node** (redundant - extraction already validates) - -**My Decision:** Delete it. Keep it simple. - -**Learning:** Don't over-engineer. If a validation is redundant, remove it. - ---- - -### Iteration 6: HTML Parsing Optimization (Deferred) -**Problem:** Sending 350KB HTML to GPT costs $0.30/job ($15-20/day for 50 jobs) -**AI's Proposal:** Add HTML parsing node โ†’ Extract 2KB structured data โ†’ 99% cost savings -**My Decision:** "Let's have a MVP live now" - Defer optimization - -**Why I Deferred:** -- Working system TODAY > perfect system next month -- Need usage data to validate cost is actually a problem -- Can optimize later when I have real metrics - -**Learning:** MVP thinking. Ship first, optimize later. Don't solve imaginary future problems. - ---- - -## What I Actually Learned - -### Technical Skills: -- n8n workflow design (16-node production system) -- Gmail API integration (bypassing platform limitations) -- Base64 decoding (URL-safe format) -- HTML entity decoding -- Regex pattern discovery through debugging -- Multi-stage AI pipeline design -- Error handling and graceful degradation - -### AI Collaboration Skills: -- **Problem identification:** I spotted issues through testing -- **Data provision:** I gave AI real email HTML for debugging -- **Decision-making:** I chose email vs RSS, MVP vs optimization -- **Tool selection:** I switched Sonnet โ†’ Opus when stuck -- **Quality control:** I validated every solution with real data -- **Product thinking:** I decided when to ship vs. when to iterate - -### Systems Thinking: -- **Input quality matters:** Email > RSS because higher-quality input -- **Leverage existing systems:** Use LinkedIn's algorithm, don't rebuild it -- **Design for resilience:** Error handling, empty cases, graceful degradation -- **Optimize judiciously:** Don't fix what isn't proven broken yet -- **Conditional processing:** Videos only for โ‰ฅ4/5 matches (quality > quantity) - ---- - -## Architecture Decisions (Strategic, Not Technical) - -### Decision 1: Multi-Model AI Strategy -**My Choice:** Different AI models for different tasks -- GPT-5: Complex extraction (new model, high accuracy, worth cost) -- GPT-4o-latest: Critical decisions (rating, cover letters) -- GPT-4o-mini: Creative tasks (video scripts) - -**Rationale:** Match model capability to task criticality, optimize cost vs. quality - ---- - -### Decision 2: Rating-Based Video Generation -**My Choice:** Only generate videos for jobs rated โ‰ฅ4/5 - -**Rationale:** -- **People-first:** Recruiters don't want 50 generic videos -- **Authentic engagement:** Videos only where there's genuine match -- **Cost optimization:** Save 60% of HeyGen costs (~$6-10/day) -- **Quality signal:** Video demonstrates real interest, not spam - ---- - -### Decision 3: Conditional Workflow Paths -**My Choice:** Error handling at every stage -- No URLs found? โ†’ Skip gracefully -- Low rating? โ†’ Save basic data, no video -- Video processing failed? โ†’ Save without video URL - -**Rationale:** System keeps running even when individual jobs fail. Resilience > fragility. - ---- - -## Human-AI Collaboration Model - -### What I (Asheesh) Did: -โœ… Identified all problems (Gmail parsing, URL extraction, IF routing) -โœ… Made strategic decisions (email vs RSS, MVP-first, multi-model AI) -โœ… Provided real debugging data (email HTML, error screenshots) -โœ… Tested every solution with actual emails -โœ… Decided when to ship vs. optimize -โœ… Switched AI tools when needed (Sonnet โ†’ Opus) - -### What AI (Claude Code) Did: -โœ… Implemented Gmail API integration -โœ… Discovered LinkedIn's `/comm/` URL pattern -โœ… Fixed n8n return format issues -โœ… Built extraction and parsing logic -โœ… Debugged technical implementation - -### Why This Matters: -Modern software development IS human-AI collaboration. The skill isn't writing every line yourself - it's: -- Knowing WHAT to build (strategic thinking) -- Identifying WHEN it's broken (problem-solving) -- Validating it WORKS with real data (quality control) -- Deciding WHEN to ship (product thinking) - ---- - -## Key Metrics - -**Development:** -- Total iterations: 6 major problem-solving cycles -- AI models used: 2 (Sonnet 4.5 โ†’ Opus 4.1 for critical bug) -- Development time: ~6-8 hours equivalent -- Code written by AI: ~95% -- Strategic decisions by human: 100% - -**System Performance:** -- Daily capacity: 50+ jobs processed automatically -- High-quality matches: ~20/day (40% match rate at โ‰ฅ4/5) -- Manual time saved: 2-3 hours/day -- Personalization: 100% (each cover letter references specific job details) -- Cost: ~$13/day (50 jobs @ $0.26 avg) -- ROI: Estimated 5-10x higher interview rate vs. generic applications - -**Workflow Stats:** -- Total nodes: 16 -- AI nodes: 4 (GPT-5, GPT-4o-latest, GPT-4o-mini) -- Conditional branches: 2 (Rating โ‰ฅ4, Video Complete) -- Error handling paths: 3 -- API integrations: 4 (Gmail, OpenAI, HeyGen, Google Sheets) - ---- - -## What Makes This Portfolio-Worthy (2024+ Standards) - -**Not Just Code:** -- Strategic thinking (email vs RSS decision) -- Problem-solving (6 iterations to working system) -- AI collaboration (effective use of AI for technical execution) -- Product thinking (MVP-first, conditional processing) -- Systems thinking (input quality, error handling, optimization strategy) - -**Real-World Impact:** -- Saves 2-3 hours daily -- Processes 10x more opportunities (50 vs 5 jobs) -- Higher quality matches (LinkedIn pre-filter + my rating system) -- Meta-benefit: The automation itself demonstrates skills employers want! - -**Honest Attribution:** -- Transparent about human-AI collaboration -- Clear about what I contributed (strategy, testing, decisions) -- Clear about what AI contributed (implementation, debugging) -- Demonstrates modern collaboration skills > pretending I wrote everything - ---- - -## Files in This Submission - -### Core Files - -**`workflow.json`** - Complete n8n workflow (860 lines, 16 nodes) ready to import -- Updated Oct 31, 2025: Fixed IF node routing condition - -**`README.md`** - This file, complete learning journey and documentation - -### Documentation Files - -**`SETUP.md`** - Step-by-step setup guide for all components - -**`CREDENTIALS.md`** - Detailed credential configuration for Gmail, OpenAI, HeyGen, Google Sheets - -**`TESTING.md`** - Comprehensive testing guide with 15 test cases -- Verify all components work correctly -- Troubleshooting for each stage -- Production readiness checklist - -**`FILE_SUMMARY.md`** - Quick overview of all files and their purpose - -**`LICENSE`** - AGPL-3.0 License with trademark notices and AI attribution - -**`.gitignore`** - Security protection (prevents credential commits) - -**Note:** You'll need to configure: -- Gmail OAuth2 credentials -- OpenAI API key -- HeyGen API key (optional) -- Google Sheets ID - ---- - -## How to Use This Workflow - -1. **Read Documentation** - - Start with this README for context - - Review [SETUP.md](SETUP.md) for detailed setup steps - - Check [CREDENTIALS.md](CREDENTIALS.md) for credential configuration - -2. **Import & Configure** - - Import `workflow.json` into n8n - - Configure credentials (Gmail, OpenAI, HeyGen, Google Sheets) - - Update Google Sheet ID in "Save to Sheet" nodes - - Customize profile data in AI prompts - -3. **Test & Verify** - - Follow [TESTING.md](TESTING.md) for comprehensive 15-step testing - - Verify IF node routes URLs correctly (Test 4) - - Test with 1-2 emails first - - Confirm all components work end-to-end - -4. **Activate** - - Enable daily 3 AM execution - - Monitor first few runs - - Check costs and adjust as needed - ---- - -## Future Optimizations (Identified, Deferred) - -### Priority 1: HTML Parsing (99% cost savings) -- Current: 350KB HTML โ†’ GPT ($0.30/job) -- Optimized: 350KB โ†’ Parse โ†’ 2KB โ†’ GPT ($0.01/job) -- Savings: $10-15/day -- When: If daily cost exceeds $20-30 - -### Priority 2: Video Generation Pipeline -- HeyGen video generation working but needs testing -- Status polling loop functional -- Need to validate video quality and turnaround time - -### Priority 3: Duplicate Detection -- Current: Check Google Sheets before processing -- Optimization: Hash-based deduplication -- Benefit: Faster lookups at scale - ---- - -## Reflections: What I'd Do Differently - -**If I started over knowing what I know now:** - -1. **Go to Gmail API first:** Would have saved 2 iterations debugging n8n node -2. **Test with real data immediately:** Found `/comm/` pattern faster with real emails -3. **Ask about n8n return format quirks:** Could have prevented format error -4. **Ship faster:** Deferred optimization sooner (MVP thinking) - -**But honestly:** The iterations taught me MORE than a perfect path would have. Debugging is where real learning happens. - ---- - -## Conclusion - -This isn't just a workflow. It's a case study in: -- **First principles thinking** (email > RSS) -- **Systems thinking** (input quality, error handling, optimization strategy) -- **Problem-solving** (6 iterations, tool switching, real data debugging) -- **AI collaboration** (strategic human + technical AI) -- **Product thinking** (MVP > perfection) - -I didn't just learn n8n. I learned how to architect systems, collaborate with AI effectively, and ship real products in the AI era. - -**That's the skill employers actually want in 2024+.** - ---- - -## ๐Ÿ“„ License - -This project is licensed under the **GNU Affero General Public License v3.0 (AGPL-3.0)** - see the [LICENSE](LICENSE) file for details. - -**Key Points:** -- Open source under AGPL-3.0 -- Trademarks: "Aethelgard Academy" and "Quest And Crossfire" (Trademark Filed - awaiting certification) -- Network use clause: Must provide source code if deployed as web service - -**What this means:** -- โœ… You can use, modify, and distribute this workflow -- โœ… You can use it for personal or commercial projects -- โœ… You must keep the same AGPL-3.0 license -- โš ๏ธ If used in a web service, you MUST make the complete source code available to users -- โš ๏ธ You must credit Quest And Crossfireโ„ข -- โš ๏ธ You cannot use Quest And Crossfireโ„ข branding -- โš ๏ธ API keys and credentials are NOT included (you must provide your own) - -**Bootcamp Context:** -- Created as part of the OutSkill AI Engineering Bootcamp 2025 (Day 4) -- Represents human-AI collaboration in modern software development -- Strategic design and architecture: Asheesh Ranjan Srivastava -- Technical implementation: AI assistance (Claude Code by Anthropic) - -**Third-Party Services:** -- Gmail API, OpenAI API, HeyGen API, Google Sheets API -- Each service has its own terms of service and pricing -- Rate limits and API costs apply based on usage - ---- - -**Author:** Asheesh Ranjan Srivastava -**Technical Partner:** Claude Code (AI) -**Date:** October 31, 2025 -**OutSkill AI Engineering Bootcamp 2025 - Day 4** - ---- - -## Appendix: Technical Details - -### Email Processing Flow -``` -Gmail Alert arrives - โ†“ -Fetch full email via Gmail API (format=full) - โ†“ -Decode base64 body (URL-safe format) - โ†“ -Extract URLs matching `/comm/jobs/view/\d+` - โ†“ -Deduplicate job IDs - โ†“ -Return array: [{json: {jobUrl: "https://...", emailId: "...", emailDate: "..."}}] -``` - -### URL Extraction Pattern Discovery -``` -Expected: /jobs/view/4281253503 -Actual: /comm/jobs/view/4281253503 โ† LinkedIn's internal format for emails! -``` - -### n8n Return Format Requirement -```javascript -// โŒ WRONG - Causes validation error -return []; - -// โŒ WRONG - Inconsistent types -if (error) return {json: {error: true}}; -return [{json: {url: "..."}}]; - -// โœ… CORRECT - Always array with json property -if (error) return [{json: {status: 'error'}}]; -return [{json: {jobUrl: "...", emailId: "...", emailDate: "..."}}]; -``` - -### Multi-Model AI Strategy -| Task | Model | Cost/1M tokens | Rationale | -|------|-------|----------------|-----------| -| Extract Job Data | GPT-5 | $2.50 | Complex HTML parsing, high accuracy critical | -| Rate Job Fit | GPT-4o-latest | $2.50 | Critical decision point, affects video generation | -| Cover Letter | GPT-4o-latest | $2.50 | Professional writing quality matters | -| Video Script | GPT-4o-mini | $0.15 | Creative task, lower stakes, cost-optimize | - ---- - -**End of README** diff --git a/Asheesh_Ranjan_Srivastava/Day-4/SETUP.md b/Asheesh_Ranjan_Srivastava/Day-4/SETUP.md deleted file mode 100644 index e32a34a..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/SETUP.md +++ /dev/null @@ -1,409 +0,0 @@ -# Setup Guide: LinkedIn Job Automation Workflow - -Complete step-by-step guide to set up and run the automated LinkedIn job application system. - ---- - -## Prerequisites - -### Required Accounts: -1. **n8n** (Cloud or Self-hosted) - - Cloud: https://n8n.io/cloud (recommended for beginners) - - Self-hosted: https://docs.n8n.io/hosting/ - -2. **Gmail Account** - - Enable LinkedIn job alerts emails - - Configure Gmail API access (see below) - -3. **OpenAI Account** - - Sign up: https://platform.openai.com/ - - API key with credits - - Models needed: GPT-5, GPT-4o-latest, GPT-4o-mini - -4. **HeyGen Account** (Optional for video generation) - - Sign up: https://heygen.com/ - - API access required - -5. **Google Sheets** - - Google account with Sheets access - - Create a tracking sheet (see below) - ---- - -## Step 1: Set Up n8n - -### Option A: n8n Cloud (Easiest) -1. Go to https://n8n.io/cloud -2. Sign up for an account -3. Create a new workspace -4. Note your workspace URL - -### Option B: Self-Hosted (Advanced) -```bash -# Using Docker -docker run -it --rm \ - --name n8n \ - -p 5678:5678 \ - -v ~/.n8n:/home/node/.n8n \ - n8nio/n8n - -# Or using npm -npm install n8n -g -n8n start -``` - ---- - -## Step 2: Configure Gmail API - -### Enable Gmail API: -1. Go to [Google Cloud Console](https://console.cloud.google.com/) -2. Create a new project or select existing -3. Enable Gmail API: - - APIs & Services โ†’ Library - - Search "Gmail API" - - Click Enable - -4. Create OAuth 2.0 Credentials: - - APIs & Services โ†’ Credentials - - Create Credentials โ†’ OAuth 2.0 Client ID - - Application type: Web application - - Add redirect URI: `https://YOUR_N8N_URL/rest/oauth2-credential/callback` - -5. Note your Client ID and Client Secret - -### Connect to n8n: -1. In n8n: Credentials โ†’ Add Credential -2. Select "Gmail OAuth2 API" -3. Enter Client ID and Client Secret -4. Click "Connect my account" -5. Authorize access - ---- - -## Step 3: Configure OpenAI API - -1. Get API Key: - - Go to https://platform.openai.com/api-keys - - Click "Create new secret key" - - Copy the key (you won't see it again!) - -2. Add to n8n: - - Credentials โ†’ Add Credential - - Select "OpenAI API" - - Paste API key - - Save - -3. Verify Access: - - Ensure you have credits - - Check rate limits: https://platform.openai.com/account/limits - ---- - -## Step 4: Configure HeyGen API (Optional) - -1. Get API Key: - - Sign up at https://heygen.com/ - - Go to Account โ†’ API - - Generate API key - -2. Add to n8n: - - Credentials โ†’ Add Credential - - Select "HTTP Header Auth" - - Header Name: `X-Api-Key` - - Value: Your HeyGen API key - - Save as "heygenApiKey" - -**Note:** Video generation is optional. Workflow will work without HeyGen, just won't generate videos. - ---- - -## Step 5: Set Up Google Sheets - -### Create Tracking Sheet: -1. Go to [Google Sheets](https://sheets.google.com/) -2. Create new spreadsheet -3. Name it: "LinkedIn Job Applications Tracker" -4. Create columns (exact names): - - Title - - Job Description - - Link - - Date - - Rating - - Company Name - - Benefits - - Location - - Match Explanation - - Cover Letter - - Video Script - - Video URL - -5. Copy the Sheet ID from URL: - ``` - https://docs.google.com/spreadsheets/d/YOUR_SHEET_ID_HERE/edit - ``` - -### Connect to n8n: -1. Credentials โ†’ Add Credential -2. Select "Google Sheets OAuth2 API" -3. Follow OAuth flow to authorize -4. Save - ---- - -## Step 6: Import Workflow - -1. **Download workflow.json** from this repository -2. In n8n: - - Click "+" โ†’ Import from File - - Select `workflow.json` - - Click Import - -3. **Update Credentials:** - - Click each node that needs credentials - - Select your configured credentials from dropdown - - Nodes needing credentials: - - "Get LinkedIn Email IDs" โ†’ Gmail OAuth2 - - "Fetch Full Email via API" โ†’ Gmail OAuth2 - - "Extract Job Data" โ†’ OpenAI API - - "Rate Job Match" โ†’ OpenAI API - - "Generate Cover Letter" โ†’ OpenAI API - - "Create Video Script" โ†’ OpenAI API - - "Generate Video (HeyGen)" โ†’ HeyGen API (optional) - - "Check Video Status" โ†’ HeyGen API (optional) - - "Save to Google Sheet" โ†’ Google Sheets OAuth2 - - "Save to Sheet (No Video)" โ†’ Google Sheets OAuth2 - - "Mark Email as Read" โ†’ Gmail OAuth2 - ---- - -## Step 7: Customize Profile Data - -**IMPORTANT:** The workflow includes MY profile data. You MUST update it with YOUR information. - -### Edit "Rate Job Match" Node: -1. Click on "Rate Job Match" node -2. Find the "Candidate Profile" section in the prompt -3. Replace with YOUR: - - Name - - Skills (technical and soft skills) - - Experience level - - Target roles - - Salary expectations - - Location preferences - - Recent projects - - Education - - Preferences (remote/hybrid, company types, etc.) - -### Edit "Generate Cover Letter" Node: -1. Click on "Generate Cover Letter" node -2. Update candidate information in prompt -3. Update recent achievements and projects - -### Edit "Create Video Script" Node: -1. Click on "Create Video Script" node -2. Update candidate name and key achievements - ---- - -## Step 8: Update Google Sheet ID - -1. Open "Save to Sheet (No Video)" node -2. Click on "Document ID" field -3. Replace with YOUR Google Sheet ID -4. Save - -Repeat for "Save to Google Sheet" node if used. - ---- - -## Step 9: Configure Gmail Filter - -### Update Email Query: -1. Click "Get LinkedIn Email IDs" node -2. Update filters: - - `sender`: Keep as `jobs-listings@linkedin.com` - - `receivedAfter`: Change to your desired start date - -### Enable LinkedIn Job Alerts: -1. Go to LinkedIn โ†’ Jobs -2. Set up job alerts for roles you want -3. Configure to email you daily/weekly -4. Alerts will come from `jobs-listings@linkedin.com` - ---- - -## Step 10: Test the Workflow - -### Test with 1 Email First: - -1. **Manual Test:** - - Click "Execute workflow" button - - Check each node output - - Verify data is correct - -2. **Check Results:** - - Look at Google Sheet - data should appear - - Verify cover letter quality - - Check if rating makes sense - -3. **Debug Issues:** - - Click on any red node to see error - - Check credentials are connected - - Verify API keys have credits - - Check Sheet ID is correct - -### Common Issues: - -**No URLs Found:** -- Check Gmail query is correct -- Verify LinkedIn sends job alerts to this email -- Check emails have `/comm/jobs/view/` links - -**Extraction Fails:** -- Verify OpenAI API key has credits -- Check rate limits not exceeded -- Ensure GPT models are accessible - -**Sheet Write Fails:** -- Verify Sheet ID is correct -- Check column names match exactly -- Ensure Google Sheets credentials authorized - ---- - -## Step 11: Activate Automation - -### Set Schedule: -1. Click "Schedule (Daily 3 AM)" node -2. Adjust cron expression if needed: - - Current: `0 3 * * *` (3 AM daily) - - Change time if desired (e.g., `0 9 * * *` for 9 AM) - -### Activate Workflow: -1. Click "Active" toggle (top right) -2. Workflow will now run automatically -3. Check execution history to monitor runs - ---- - -## Step 12: Monitor and Optimize - -### Check Daily: -- Review execution log -- Check Google Sheet for new entries -- Verify cover letters are high quality -- Adjust profile data if matches are poor - -### Optimize Over Time: -1. **Adjust Rating Criteria:** - - If too many low ratings: Make criteria more lenient - - If too many high ratings: Make criteria stricter - -2. **Refine Prompts:** - - Improve cover letter quality - - Adjust video script tone - - Tune job description extraction - -3. **Add HTML Parsing:** (Future optimization) - - Reduces costs by 99% - - See README.md for details - ---- - -## Cost Estimation - -### Daily Costs (50 jobs): -- **OpenAI GPT-5:** ~$5-7 (extraction) -- **OpenAI GPT-4o-latest:** ~$5-7 (rating + cover letters) -- **OpenAI GPT-4o-mini:** ~$0.50 (video scripts) -- **HeyGen Videos:** ~$3-6 (20 videos at โ‰ฅ4 rating) -- **Total:** ~$13-20/day - -### Cost Optimization: -- **Free tier:** Use OpenAI free credits initially -- **Reduce jobs:** Filter emails more strictly -- **Skip videos:** Disable HeyGen for now -- **HTML parsing:** Implement to save 99% extraction cost - ---- - -## Troubleshooting - -### Workflow Not Running: -- [ ] Check "Active" toggle is ON -- [ ] Verify schedule trigger is configured -- [ ] Check n8n has internet access -- [ ] Review execution log for errors - -### No Emails Processed: -- [ ] Confirm LinkedIn sends alerts to Gmail -- [ ] Check Gmail query filters -- [ ] Verify OAuth not expired -- [ ] Check email date range - -### Extraction Errors: -- [ ] Verify OpenAI API key valid -- [ ] Check API credits remain -- [ ] Ensure rate limits not hit -- [ ] Review error messages in node output - -### Sheet Write Errors: -- [ ] Verify Sheet ID correct -- [ ] Check column names exact match -- [ ] Ensure OAuth authorized -- [ ] Check sheet not protected/locked - ---- - -## Security Best Practices - -### Protect Your Credentials: -- โŒ Never commit API keys to git -- โŒ Never share workflow with credentials -- โœ… Use n8n's credential encryption -- โœ… Export workflow without credentials -- โœ… Rotate API keys periodically - -### Data Privacy: -- Your profile data is in the workflow -- Cover letters contain personal information -- Google Sheet has application history -- Consider privacy when sharing - ---- - -## Next Steps - -1. โœ… Complete all setup steps above -2. โœ… Test with 1-2 emails first -3. โœ… Customize profile data thoroughly -4. โœ… Review and adjust rating criteria -5. โœ… Activate workflow for daily automation -6. ๐Ÿ“ˆ Monitor results and optimize over time - ---- - -## Getting Help - -### Resources: -- **n8n Docs:** https://docs.n8n.io/ -- **n8n Community:** https://community.n8n.io/ -- **OpenAI Docs:** https://platform.openai.com/docs -- **Gmail API Docs:** https://developers.google.com/gmail/api - -### Common Questions: -See [CREDENTIALS.md](CREDENTIALS.md) for detailed credential setup. -See [README.md](README.md) for project overview and learning journey. - ---- - -**Setup completed?** You should now have a fully functional automated job application system! - -**Need help?** Check the troubleshooting section above or refer to n8n community forums. - ---- - -**Last Updated:** October 31, 2025 -**Author:** Asheesh Ranjan Srivastava -**Project:** OutSkill AI Engineering Bootcamp 2025 - Day 4 diff --git a/Asheesh_Ranjan_Srivastava/Day-4/TESTING.md b/Asheesh_Ranjan_Srivastava/Day-4/TESTING.md deleted file mode 100644 index 83fad16..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/TESTING.md +++ /dev/null @@ -1,597 +0,0 @@ -# Testing & Verification Guide - -Complete testing checklist for the LinkedIn Job Automation workflow. - ---- - -## Overview - -This guide provides step-by-step instructions to test and verify all components of the workflow after setup. - -**Recent Fix (October 31, 2025):** -- โœ… Fixed IF node "Check Extraction Status" condition -- Changed from `notEmpty` operation to `startsWith "https://"` -- This ensures valid LinkedIn URLs route to scraping, while error cases route to handling - ---- - -## Pre-Testing Checklist - -Before running tests, ensure: - -- [ ] All credentials configured in n8n (Gmail, OpenAI, HeyGen, Google Sheets) -- [ ] Google Sheet created with correct column structure -- [ ] Sheet ID updated in workflow nodes -- [ ] Profile data customized in AI prompts -- [ ] Workflow imported and saved in n8n - ---- - -## Test 1: Email Fetching - -**Goal:** Verify Gmail integration works and fetches LinkedIn job alerts - -### Steps: - -1. Open n8n workflow -2. Click "Get LinkedIn Email IDs" node -3. Click "Execute Node" button -4. Expected output: - ```json - [ - { - "id": "18f1234567890abcd", - "threadId": "18f1234567890abcd" - } - ] - ``` - -### Success Criteria: -- โœ… Returns list of email IDs -- โœ… No authentication errors -- โœ… Emails are from `jobs-listings@linkedin.com` - -### Troubleshooting: -- **No emails found:** Check Gmail filters, ensure you have LinkedIn job alerts -- **Auth error:** Re-authorize Gmail OAuth in credentials -- **Rate limit:** Wait 1 minute and retry - ---- - -## Test 2: Full Email Retrieval - -**Goal:** Verify Gmail API returns complete email HTML - -### Steps: - -1. Click "Fetch Full Email via API" node -2. Click "Execute Node" -3. Check output has `payload.parts[0].body.data` field -4. Expected output structure: - ```json - { - "payload": { - "parts": [ - { - "body": { - "data": "PGh0bWw-..." // Base64 encoded HTML - } - } - ] - } - } - ``` - -### Success Criteria: -- โœ… Returns full email payload -- โœ… `payload.parts` array exists -- โœ… `body.data` contains base64-encoded content - -### Troubleshooting: -- **Missing parts array:** Verify using HTTP Request node (not Gmail node) -- **Empty data:** Check Gmail credential has read permissions - ---- - -## Test 3: URL Extraction โš ๏ธ CRITICAL - -**Goal:** Verify LinkedIn job URLs are extracted from email HTML - -### Steps: - -1. Click "Extract Job URLs" node -2. Click "Execute Node" -3. Expected output (4-6 items): - ```json - [ - { - "json": { - "jobUrl": "https://www.linkedin.com/jobs/view/4318243991" - } - }, - { - "json": { - "jobUrl": "https://www.linkedin.com/jobs/view/4318567890" - } - } - ] - ``` - -### Success Criteria: -- โœ… Returns 4-6 unique job URLs per email -- โœ… URLs match pattern: `https://www.linkedin.com/jobs/view/{job_id}` -- โœ… No duplicate URLs - -### Troubleshooting: -- **0 URLs found:** Email might be promotional, try another email -- **Returns status: 'no_urls':** This is correct behavior for emails without jobs -- **Duplicates:** Code should deduplicate, check Extract Job URLs code - ---- - -## Test 4: IF Node Routing โš ๏ธ FIXED - -**Goal:** Verify IF node correctly routes valid URLs to scraping - -### Steps: - -1. Click "Check Extraction Status" node -2. Click "Execute Node" -3. Check routing: - - **True branch (green):** Items with valid `jobUrl` starting with "https://" - - **False branch (red):** Items with `status: 'no_urls'` - -### Success Criteria: -- โœ… Valid URLs go to True branch โ†’ "Scrape Job Page" -- โœ… Error objects go to False branch โ†’ "Handle No URLs" -- โœ… All 6 URLs (if extracted) route to True branch - -### What Changed: -**Before (broken):** -```json -{ - "operation": "notEmpty", - "rightValue": "true" // Incorrect -} -``` - -**After (fixed):** -```json -{ - "operation": "startsWith", - "rightValue": "https://" // Correct -} -``` - -### Troubleshooting: -- **All items go to False:** Re-import updated workflow.json with fix -- **Mixed routing:** This is correct if some items have no URLs - ---- - -## Test 5: Job Page Scraping - -**Goal:** Verify LinkedIn job pages are scraped successfully - -### Steps: - -1. Click "Scrape Job Page" node -2. Click "Execute Node" -3. Expected output: Large HTML text (200-350 KB) -4. Check for LinkedIn page content - -### Success Criteria: -- โœ… Returns HTML content (very large text) -- โœ… No 404 errors -- โœ… No rate limiting errors - -### Troubleshooting: -- **404 Not Found:** Job posting may have been removed -- **403 Forbidden:** LinkedIn may be blocking requests (use VPN or wait) -- **Timeout:** Increase timeout in node settings (currently 30 seconds) - ---- - -## Test 6: GPT Extraction - -**Goal:** Verify GPT-5 extracts job details from HTML - -### Steps: - -1. Click "Extract Job Data" node -2. Click "Execute Node" -3. Expected output: - ```json - { - "message": { - "content": { - "company_name": "TELUS Digital", - "job_title": "AI Product Manager", - "benefits": "401K, Medical, Remote work", - "job_description": "Lead AI product development...", - "location": "Remote", - "salary_range": "$90,000 - $120,000" - } - } - } - ``` - -### Success Criteria: -- โœ… Returns structured JSON -- โœ… All fields populated (or empty string if not found) -- โœ… job_description is concise (200 chars or less) - -### Troubleshooting: -- **OpenAI API error:** Check API key and credits -- **Empty fields:** Acceptable if job posting doesn't include that info -- **Rate limit:** Wait 60 seconds and retry - ---- - -## Test 7: Job Rating - -**Goal:** Verify GPT-4o-latest rates job fit (0-5 scale) - -### Steps: - -1. Click "Rate Job Match" node -2. Click "Execute Node" -3. Expected output: - ```json - { - "message": { - "content": { - "rating": 4, - "explanation": "Strong match due to AI/ML focus and remote option. Experience level aligns well with 6+ years background." - } - } - } - ``` - -### Success Criteria: -- โœ… Rating is number between 0-5 -- โœ… Explanation provides clear reasoning -- โœ… Explanation mentions specific profile matches - -### Troubleshooting: -- **Low ratings for good jobs:** Update candidate profile in prompt to be more accurate -- **High ratings for bad jobs:** Adjust scoring criteria in system prompt - ---- - -## Test 8: Cover Letter Generation - -**Goal:** Verify GPT-4o-latest creates personalized cover letters - -### Steps: - -1. Click "Generate Cover Letter" node -2. Click "Execute Node" -3. Expected output: - ```json - { - "message": { - "content": { - "cover_letter": "Dear Hiring Manager,\n\nI was excited to discover..." - } - } - } - ``` - -### Success Criteria: -- โœ… Cover letter is 300-400 words -- โœ… Mentions specific company and role -- โœ… References Quest & Crossfire -- โœ… Professional but conversational tone -- โœ… Not generic (avoid "I am writing to apply") - -### Troubleshooting: -- **Too generic:** Update system prompt with more specific instructions -- **Too long:** Adjust word count limit in prompt - ---- - -## Test 9: Video Script Creation - -**Goal:** Verify GPT-4o-mini creates 45-second video scripts - -### Steps: - -1. Click "Create Video Script" node -2. Click "Execute Node" -3. Expected output: - ```json - { - "message": { - "content": { - "video_script": "Hi, I'm Asheesh. I noticed your AI Product Manager role..." - } - } - } - ``` - -### Success Criteria: -- โœ… Script is approximately 45 seconds when read aloud -- โœ… Sounds natural and conversational -- โœ… Includes hook, match, proof, CTA structure -- โœ… Mentions specific company/role - ---- - -## Test 10: Rating-Based Video Generation โš ๏ธ OPTIONAL - -**Goal:** Verify HeyGen generates videos only for rating โ‰ฅ4 - -### Steps: - -1. Ensure job has rating โ‰ฅ4 from Test 7 -2. Click "IF Rating >= 4" node -3. Check routing: - - **True:** Rating is 4 or 5 โ†’ Goes to "Generate Video (HeyGen)" - - **False:** Rating is 0-3 โ†’ Skips video generation - -### Success Criteria: -- โœ… High-rated jobs (4-5) go to video generation -- โœ… Low-rated jobs (0-3) skip video generation -- โœ… Saves costs by not generating videos for poor matches - ---- - -## Test 11: HeyGen Video Generation โš ๏ธ REQUIRES CREDITS - -**Goal:** Verify HeyGen API creates AI avatar videos - -### Prerequisites: -- HeyGen API key configured -- HeyGen account has sufficient credits (~$0.30 per video) -- Rating โ‰ฅ4 job (from Test 10) - -### Steps: - -1. Click "Generate Video (HeyGen)" node -2. Click "Execute Node" -3. Expected output: - ```json - { - "code": 100, - "data": { - "video_id": "abc123xyz789" - } - } - ``` - -### Success Criteria: -- โœ… Returns video_id -- โœ… No authentication errors -- โœ… No insufficient credits error - -### Troubleshooting: -- **Invalid API key:** Check credential name is exactly `heygenApiKey` -- **Insufficient credits:** Add credits to HeyGen account -- **Skip this test:** Delete video nodes if not using HeyGen - ---- - -## Test 12: Video Status Check - -**Goal:** Verify video generation completes successfully - -### Steps: - -1. Wait 30 seconds (automatic via "Wait 30 Seconds" node) -2. Click "Check Video Status" node -3. Expected output: - ```json - { - "data": { - "status": "completed", - "video_url": "https://heygen-asset.s3.amazonaws.com/..." - } - } - ``` - -### Success Criteria: -- โœ… Status is "completed" -- โœ… video_url is present -- โœ… Video URL is accessible - -### Troubleshooting: -- **Status: "pending":** Video still processing, wait another 30 seconds -- **Status: "failed":** Check HeyGen dashboard for error details -- **No video_url:** Generation failed, check script content - ---- - -## Test 13: Google Sheets Saving - -**Goal:** Verify job data saves to Google Sheet - -### Steps: - -1. Click "Save to Sheet (No Video)" node (for jobs without videos) -2. OR click "Save to Google Sheet" node (for jobs with videos) -3. Click "Execute Node" -4. Open Google Sheet and verify new row appears - -### Expected Sheet Row: -| Title | Job Description | Link | Date | Rating | Company Name | Benefits | Location | Match Explanation | Cover Letter | Video Script | Video URL | -|-------|----------------|------|------|--------|--------------|----------|----------|------------------|--------------|--------------|-----------| -| AI Product Manager | Lead AI... | https://... | [auto] | 4 | TELUS Digital | 401K, Medical | Remote | Strong match... | Dear Hiring... | Hi, I'm Asheesh... | [URL or empty] | - -### Success Criteria: -- โœ… New row appears in sheet -- โœ… All fields populated correctly -- โœ… No duplicate rows (based on Title matching) -- โœ… Cover letter and video script are readable - -### Troubleshooting: -- **Permission denied:** Re-authorize Google Sheets OAuth -- **Sheet not found:** Verify Sheet ID in node configuration -- **Column mismatch:** Ensure column names match exactly -- **Duplicates:** Check "Title" is set as matching column - ---- - -## Test 14: Email Marking as Read - -**Goal:** Verify Gmail email is marked as read after processing - -### Steps: - -1. Click "Mark Email as Read" node -2. Click "Execute Node" -3. Check Gmail inbox - email should be marked as read - -### Success Criteria: -- โœ… Email shows as read in Gmail -- โœ… No errors - ---- - -## Test 15: End-to-End Workflow Test โš ๏ธ COMPREHENSIVE - -**Goal:** Run complete workflow from start to finish - -### Steps: - -1. Ensure you have 1-2 unread LinkedIn job alert emails in Gmail -2. Click workflow title at top -3. Click "Execute Workflow" button (NOT individual nodes) -4. Wait for all nodes to complete (3-5 minutes) -5. Check execution path: - - Schedule โ†’ Get Emails โ†’ Fetch Full Email โ†’ Extract URLs โ†’ Check Status - - โ†’ Scrape โ†’ Extract โ†’ Rate โ†’ Cover Letter โ†’ Video Script - - โ†’ Save to Sheet โ†’ Mark as Read - -### Success Criteria: -- โœ… All nodes execute without errors -- โœ… 4-6 jobs processed per email -- โœ… Google Sheet has new rows -- โœ… High-rated jobs (โ‰ฅ4) generate videos (if HeyGen enabled) -- โœ… Emails marked as read - -### Execution Time: -- **Without videos:** ~2-3 minutes for 6 jobs -- **With videos:** ~5-7 minutes for 6 jobs - -### Troubleshooting: -- **Stops at Extract URLs:** Check email has LinkedIn job postings -- **Stops at Scrape:** LinkedIn may be rate limiting, try VPN -- **Stops at Extract Job Data:** Check OpenAI API credits -- **Stops at video generation:** Check HeyGen credits -- **Sheet not updated:** Check Google Sheets credential - ---- - -## Cost Monitoring - -### Expected Costs (Per Run): - -**OpenAI API (50 jobs):** -- GPT-5 extraction: $5-7/day -- GPT-4o-latest (rating + cover): $5-7/day -- GPT-4o-mini (video scripts): $0.50/day -- **Total:** ~$10-15/day - -**HeyGen API (20 videos):** -- 45-second videos: $0.25-0.35 each -- **Total:** ~$5-7/day - -**Total Daily Cost:** $15-22 for 50 jobs/day - -### Monitor Usage: -- **OpenAI:** https://platform.openai.com/usage -- **HeyGen:** Check account dashboard -- **Gmail/Sheets:** Free (no cost) - ---- - -## Production Readiness Checklist - -Before enabling daily automation: - -- [ ] All 15 tests passed successfully -- [ ] IF node routing works correctly (Test 4) -- [ ] Cost monitoring set up -- [ ] Google Sheet organized and accessible -- [ ] Profile data in prompts is accurate -- [ ] Spending limits set on OpenAI and HeyGen -- [ ] Workflow schedule set to appropriate time (3 AM default) -- [ ] Error handling tested (no emails, no URLs, failed scraping) - ---- - -## Known Issues & Limitations - -### Current Known Issues: -1. โœ… **IF node routing (FIXED):** Changed from `notEmpty` to `startsWith` operation -2. **LinkedIn rate limiting:** May occur with high volume, use VPN if needed -3. **Job post removal:** Some URLs may 404 if posting was removed - -### Future Optimizations: -1. **HTML parsing:** Reduce GPT extraction cost by 99% (350KB โ†’ 2KB) -2. **Retry logic:** Auto-retry failed API calls -3. **Batch processing:** Process multiple emails in parallel - ---- - -## Getting Help - -### Resources: -- **Setup Issues:** See [SETUP.md](SETUP.md) -- **Credential Issues:** See [CREDENTIALS.md](CREDENTIALS.md) -- **Workflow Issues:** See [README.md](README.md) learning journey - -### External Support: -- **n8n Community:** https://community.n8n.io/ -- **OpenAI Support:** https://help.openai.com/ -- **HeyGen Support:** support@heygen.com - ---- - -## Test Results Template - -Use this to track your testing progress: - -```markdown -## Testing Session: [Date] - -### Environment: -- n8n: [Cloud/Self-hosted] -- Test emails: [Number] -- OpenAI credits: $[Amount] -- HeyGen credits: $[Amount] - -### Test Results: -- [ ] Test 1: Email Fetching - [Pass/Fail] -- [ ] Test 2: Full Email Retrieval - [Pass/Fail] -- [ ] Test 3: URL Extraction - [Pass/Fail] -- [ ] Test 4: IF Node Routing - [Pass/Fail] โ† RECENTLY FIXED -- [ ] Test 5: Job Page Scraping - [Pass/Fail] -- [ ] Test 6: GPT Extraction - [Pass/Fail] -- [ ] Test 7: Job Rating - [Pass/Fail] -- [ ] Test 8: Cover Letter - [Pass/Fail] -- [ ] Test 9: Video Script - [Pass/Fail] -- [ ] Test 10: Rating IF Node - [Pass/Fail] -- [ ] Test 11: HeyGen Video - [Pass/Fail/Skipped] -- [ ] Test 12: Video Status - [Pass/Fail/Skipped] -- [ ] Test 13: Google Sheets - [Pass/Fail] -- [ ] Test 14: Mark as Read - [Pass/Fail] -- [ ] Test 15: End-to-End - [Pass/Fail] - -### Issues Found: -[List any issues] - -### Cost Incurred: -- OpenAI: $[Amount] -- HeyGen: $[Amount] - -### Notes: -[Any additional observations] -``` - ---- - -**Last Updated:** October 31, 2025 (IF node fix applied) -**Author:** Asheesh Ranjan Srivastava -**Project:** OutSkill AI Engineering Bootcamp 2025 - Day 4 diff --git a/Asheesh_Ranjan_Srivastava/Day-4/workflow.json b/Asheesh_Ranjan_Srivastava/Day-4/workflow.json deleted file mode 100644 index ac08dab..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-4/workflow.json +++ /dev/null @@ -1,859 +0,0 @@ -{ - "name": "Asheesh_Email_Video_Job_Automation_OPTIMIZED", - "nodes": [ - { - "parameters": { - "rule": { - "interval": [ - { - "field": "cronExpression", - "expression": "0 3 * * *" - } - ] - } - }, - "type": "n8n-nodes-base.scheduleTrigger", - "typeVersion": 1.2, - "position": [ - -2928, - 112 - ], - "id": "b09b5405-8142-444a-884e-7875c03a5370", - "name": "Schedule (Daily 3 AM)" - }, - { - "parameters": { - "operation": "getAll", - "filters": { - "receivedAfter": "2025-10-31T05:30:05", - "sender": "jobs-listings@linkedin.com" - } - }, - "type": "n8n-nodes-base.gmail", - "typeVersion": 2.1, - "position": [ - -2720, - 112 - ], - "id": "ff5a656e-a3e8-47c2-864c-ecd19b827fc7", - "name": "Get LinkedIn Email IDs", - "webhookId": "360d39b3-3049-4366-ad10-d7e48504ee8d", - "credentials": { - "gmailOAuth2": { - "id": "pdqiqmxTBB5BaN3c", - "name": "Gmail account" - } - } - }, - { - "parameters": { - "url": "=https://gmail.googleapis.com/gmail/v1/users/me/messages/{{ $json.id }}?format=full", - "authentication": "predefinedCredentialType", - "nodeCredentialType": "gmailOAuth2", - "options": {} - }, - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.2, - "position": [ - -2528, - 112 - ], - "id": "d57bbfef-e300-4b41-8baf-2ba63b74070a", - "name": "Fetch Full Email via API", - "credentials": { - "gmailOAuth2": { - "id": "pdqiqmxTBB5BaN3c", - "name": "Gmail account" - } - } - }, - { - "parameters": { - "jsCode": "// This WILL work if you have the right node\nvar email = $input.item.json;\nvar items = [];\n\n// Get body\nvar body = '';\nif (email && email.payload && email.payload.parts && email.payload.parts[0]) {\n var data = email.payload.parts[0].body.data;\n if (data) {\n data = data.replace(/-/g, '+').replace(/_/g, '/');\n body = Buffer.from(data, 'base64').toString();\n }\n}\n\n// Find job IDs \nvar matches = body.match(/\\/comm\\/jobs\\/view\\/\\d+/g) || [];\nfor (var i = 0; i < matches.length; i++) {\n var id = matches[i].match(/\\d+/)[0];\n items.push({\n json: {\n jobUrl: 'https://www.linkedin.com/jobs/view/' + id\n }\n });\n}\n\n// Always return something valid\nif (items.length === 0) {\n items.push({json: {status: 'no_urls'}});\n}\n\nreturn items;" - }, - "type": "n8n-nodes-base.code", - "typeVersion": 2, - "position": [ - -2320, - 112 - ], - "id": "fc1f457d-d720-40f1-b980-3ea4c3b27ad3", - "name": "Extract Job URLs" - }, - { - "parameters": { - "conditions": { - "options": { - "caseSensitive": true, - "leftValue": "", - "typeValidation": "strict", - "version": 1 - }, - "conditions": [ - { - "leftValue": "={{ $json.jobUrl }}", - "rightValue": "https://", - "operator": { - "type": "string", - "operation": "startsWith" - }, - "id": "7bb54c39-7877-4e50-8f82-82c37f1f293c" - } - ], - "combinator": "and" - }, - "options": {} - }, - "type": "n8n-nodes-base.if", - "typeVersion": 2.1, - "position": [ - -2128, - 112 - ], - "id": "bfa3dba1-772f-450c-8839-56bd46e5018a", - "name": "Check Extraction Status" - }, - { - "parameters": { - "url": "={{ $json.jobUrl }}", - "options": { - "response": { - "response": { - "responseFormat": "text" - } - }, - "timeout": 30000 - } - }, - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.2, - "position": [ - -1920, - 16 - ], - "id": "35f3ad63-5ce3-48e8-b1bf-ed740b4b8d86", - "name": "Scrape Job Page" - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "gpt-5", - "mode": "list", - "cachedResultName": "GPT-5" - }, - "messages": { - "values": [ - { - "content": "You're an intelligent bot capable of extracting structured data from LinkedIn job listing pages.", - "role": "system" - }, - { - "content": "=Here's the job page HTML:\\n{{ $json.data }}" - }, - { - "content": "Extract the job details and return JSON with this exact structure:\\n\\n{\\n \"company_name\": \"\",\\n \"job_title\": \"\",\\n \"benefits\": \"\",\\n \"job_description\": \"\",\\n \"location\": \"\",\\n \"salary_range\": \"\"\\n}\\n\\nFor job_description: Summarize key responsibilities and requirements in 200 characters or less.\\nFor benefits: List benefits separated by commas (401K, medical, remote work, etc).\\nIf salary_range is not mentioned, use empty string.", - "role": "assistant" - } - ] - }, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - -1520, - 16 - ], - "id": "c29f238e-1b7f-4d9a-85c4-b5f2c7ba8eb2", - "name": "Extract Job Data", - "credentials": { - "openAiApi": { - "id": "vscPVbqHtDxQUTIx", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "chatgpt-4o-latest", - "mode": "list", - "cachedResultName": "CHATGPT-4O-LATEST" - }, - "messages": { - "values": [ - { - "content": "You're an intelligent bot rating how closely a job listing matches a candidate's profile on a scale of 0-5.\\n\\nScoring Criteria:\\n- 2 points: Skills match perfectly (systems thinking, AI/ML, product architecture) [1 point for partial match]\\n- 1 point: Right experience level (4-8 years for mid-senior roles)\\n- 1 point: Remote or hybrid work option\\n- 1 point: Job role aligns with past experience (program leadership, product, AI/ML)\\n- 1 point: Technical skills align (Python, AI/ML, systems thinking, product architecture)\\n- 1 point: Company/sector matches preferences (Ed-Tech, AI-first, learning products, startup/scale-up)\\n\\nBonus considerations:\\n- AI Product Manager or Technical PM roles: +0.5\\n- Startup/scale-up environment: +0.5\\n- Salary range โ‚น8-12 LPA or above: Mention in explanation\\n- January 2026 start date compatible: Mention in explanation\\n\\nReturn JSON with:\\n{\\n \"rating\": ,\\n \"explanation\": \"<2-3 sentence explanation of why this score, highlighting strengths and any concerns>\"\\n}", - "role": "system" - }, - { - "content": "=Here's the job listing details:\\n\\nJob Title: {{ $json.message.content.job_title }}\\nCompany: {{ $json.message.content.company_name }}\\nLocation: {{ $json.message.content.location }}\\nJob Description: {{ $json.message.content.job_description }}\\nBenefits: {{ $json.message.content.benefits }}\\nSalary Range: {{ $json.message.content.salary_range }}\\n\\nCandidate Profile:\\n\\\"\\\"\\\"\\nAsheesh Ranjan Srivastava - AI-Native Builder & Founder\\n\\nOVERVIEW:\\nAI-native builder with 6+ years in data analysis and program leadership. Founder of Quest & Crossfire, building AI-powered learning platforms. Currently completing MS in Computer Science (AI/ML) and OutSkill AI Engineering Bootcamp 2025.\\n\\nTARGET ROLES: AI Product Manager | Technical Product Manager (AI/ML) | AI Solutions Architect | Ed-Tech Product\\nAVAILABLE: January 2026\\nSALARY: โ‚น8-12 LPA\\nLOCATION: Remote-first or Lucknow | Willing to relocate\\n\\nCORE SKILLS:\\n- Systems Thinking & Product Architecture\\n- AI-Native Development & AI Collaboration \\n- Data Analysis & MIS Design\\n- Program Leadership (led 10-member teams)\\n- Capacity Building (trained 400+ professionals)\\n- Workflow Automation\\n\\nTECHNICAL STACK:\\n- Languages: Python\\n- AI/ML: OpenAI API, Hugging Face Transformers, GPT-3.5, BART, T5, Pegasus\\n- Frameworks: Streamlit, Gradio\\n- Automation: n8n, Workflow Design\\n- Data: Power BI, Excel, MIS Systems\\n- Tools: Git, Obsidian\\n\\nRECENT PROJECTS (2025):\\n1. Multi-Model Text Summarization (4 AI models, 83% cost reduction)\\n2. Multi-Persona Chatbot (Live: questandcrossfire.com/chatbot)\\n3. Obsidian AI Assistant (Live: questandcrossfire.com/obsidian)\\n4. Quest & Crossfire Platform (85% MVP, launching Q1 2026)\\n\\nEXPERIENCE HIGHLIGHTS:\\n- Quest & Crossfire: Founder (June 2025 - Present)\\n- Finish Society: M&E Executive (2021-2022) - Dashboards, MIS optimization\\n- Piramal Foundation: Program Leader (2018-2020) - Led teams, trained 400+ professionals, coordinated 5 states\\n- Kaivalya Education Foundation: Gandhi Fellow (2016-2018) - Data-driven coaching, capacity building\\n\\nKEY ACHIEVEMENTS:\\n- 6+ years data/program leadership\\n- 400+ professionals trained\\n- 10-member teams led\\n- 2 production AI apps deployed (3 weeks)\\n- Scaled programs across 5 states\\n- 50+ teachers coached\\n\\nEDUCATION:\\n- MS Computer Science (AI/ML) - Woolf University (2025-2027) - In Progress\\n- Master's Development Studies - IIT Guwahati (2014-2016)\\n- B.Sc. Industrial Chemistry - Delhi University (2010-2014)\\n\\nCERTIFICATIONS:\\n- OutSkill AI Engineering Bootcamp 2025\\n- Business Intelligence using Power BI\\n- Python for Data Science\\n- Public Policy and Social Change\\n\\nAPPROACH:\\n\\\"I'm an AI-native builder. I architect systems and partner with AI to implement them. This approach shipped production-ready platform in 3 weeks instead of months.\\\"\\n\\nPREFERENCES:\\n- Remote-first or hybrid culture\\n- Ed-Tech, Learning Products, or AI-first companies\\n- Startup/scale-up environment\\n- Mission: Products that help people learn, grow, navigate complexity\\n\\\"\\\"\\\"" - } - ] - }, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - -1152, - -288 - ], - "id": "8771eb3e-05c6-4b25-956a-4f7d8f98dbaf", - "name": "Rate Job Match", - "credentials": { - "openAiApi": { - "id": "vscPVbqHtDxQUTIx", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "chatgpt-4o-latest", - "mode": "list", - "cachedResultName": "CHATGPT-4O-LATEST" - }, - "messages": { - "values": [ - { - "content": "You're an expert at creating personalized, compelling cover letters for AI Product Manager roles.\\n\\nCreate a cover letter that:\\n1. Shows genuine interest in the specific role and company\\n2. Highlights relevant experience (don't just repeat resume)\\n3. Demonstrates understanding of the role requirements\\n4. Shows personality and authentic voice (reflective, honest, systematic)\\n5. Connects candidate's AI-native approach to company mission\\n6. Mentions Quest & Crossfire and recent AI projects as proof of skills\\n7. Keep it concise (300-400 words maximum)\\n8. Professional but conversational tone\\n\\nAvoid:\\n- Generic phrases like \\\"I am writing to apply\\\"\\n- Simply listing resume bullets\\n- Over-the-top enthusiasm or buzzwords\\n- Being too formal or stiff\\n\\nReturn JSON:\\n{\\n \\\"cover_letter\\\": \\\"\\\"\\n}", - "role": "system" - }, - { - "content": "=Generate a compelling cover letter for:\\n\\nJob Title: {{ $node['Extract Job Data'].json.message.content.job_title }}\\nCompany: {{ $node['Extract Job Data'].json.message.content.company_name }}\\nLocation: {{ $node['Extract Job Data'].json.message.content.location }}\\nJob Description: {{ $node['Extract Job Data'].json.message.content.job_description }}\\nBenefits: {{ $node['Extract Job Data'].json.message.content.benefits }}\\nMatch Rating: {{ $json.message.content.rating }}/5\\nMatch Explanation: {{ $json.message.content.explanation }}\\n\\nCandidate: Asheesh Ranjan Srivastava\\nKey Differentiator: AI-native builder who shipped 2 production AI apps in 3 weeks\\nCurrent: Founder at Quest & Crossfire, building AI-powered learning platform\\nAvailable: January 2026" - } - ] - }, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - -1120, - 16 - ], - "id": "1bcab9c8-768b-449b-a01f-84df7a139418", - "name": "Generate Cover Letter", - "credentials": { - "openAiApi": { - "id": "vscPVbqHtDxQUTIx", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "gpt-4o-mini", - "mode": "list", - "cachedResultName": "GPT-4O-MINI" - }, - "messages": { - "values": [ - { - "content": "You're creating a 45-second video script for a personalized job application video. The script should sound natural, confident, and conversational - like someone genuinely excited about the role, not reading from a script.\\n\\nStructure:\\n1. Hook (5 sec): Personal greeting + company/role mention\\n2. Match (15 sec): Why I'm perfect for THIS specific role\\n3. Proof (15 sec): Concrete example of relevant achievement\\n4. Call to Action (10 sec): Next steps + enthusiasm\\n\\nTone: Confident but humble, enthusiastic but professional, specific not generic\\n\\nReturn JSON:\\n{\\n \\\"video_script\\\": \\\"\\\"\\n}", - "role": "system" - }, - { - "content": "=Create video script for:\\n\\nJob: {{ $node['Extract Job Data'].json.message.content.job_title }} at {{ $node['Extract Job Data'].json.message.content.company_name }}\\nLocation: {{ $node['Extract Job Data'].json.message.content.location }}\\nKey Requirements: {{ $node['Extract Job Data'].json.message.content.job_description }}\\n\\nCandidate: Asheesh\\nKey Achievement: Built and deployed 2 AI applications in 3 weeks\\nUnique Angle: AI-native builder who partners with AI to ship fast\\nPersonality: Reflective, systematic, honest" - } - ] - }, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - -1088, - 480 - ], - "id": "e15dc853-6fe6-4f77-a622-ccfc44ea6b53", - "name": "Create Video Script", - "credentials": { - "openAiApi": { - "id": "vscPVbqHtDxQUTIx", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "conditions": { - "options": { - "caseSensitive": true, - "leftValue": "", - "typeValidation": "strict" - }, - "conditions": [ - { - "leftValue": "={{ $node['Rate Job Match'].json.message.content.rating }}", - "rightValue": 4, - "operator": { - "type": "number", - "operation": "gte" - } - } - ], - "combinator": "and" - }, - "options": {} - }, - "type": "n8n-nodes-base.if", - "typeVersion": 2.1, - "position": [ - -384, - -352 - ], - "id": "55ecf7c5-ec88-4c71-bf1f-3276b3eb3e5c", - "name": "IF Rating >= 4" - }, - { - "parameters": { - "url": "https://api.heygen.com/v2/video/generate", - "authentication": "genericCredentialType", - "genericAuthType": "httpHeaderAuth", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "X-Api-Key", - "value": "={{ $credentials.heygenApiKey }}" - } - ] - }, - "sendBody": true, - "specifyBody": "json", - "jsonBody": "={\\n \\\"video_inputs\\\": [\\n {\\n \\\"character\\\": {\\n \\\"type\\\": \\\"avatar\\\",\\n \\\"avatar_id\\\": \\\"Daisy-inskirt-20220818\\\",\\n \\\"avatar_style\\\": \\\"normal\\\"\\n },\\n \\\"voice\\\": {\\n \\\"type\\\": \\\"text\\\",\\n \\\"input_text\\\": \\\"{{ $json.message.content.video_script }}\\\",\\n \\\"voice_id\\\": \\\"1bd001e7e50f421d891986aad5158bc8\\\",\\n \\\"speed\\\": 1.1\\n },\\n \\\"background\\\": {\\n \\\"type\\\": \\\"color\\\",\\n \\\"value\\\": \\\"#f5f5f5\\\"\\n }\\n }\\n ],\\n \\\"dimension\\\": {\\n \\\"width\\\": 1280,\\n \\\"height\\\": 720\\n },\\n \\\"aspect_ratio\\\": \\\"16:9\\\",\\n \\\"test\\\": false\\n}", - "options": {} - }, - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.2, - "position": [ - -512, - -96 - ], - "id": "9450793a-b1a4-4761-aff5-f83ff13fa0ed", - "name": "Generate Video (HeyGen)" - }, - { - "parameters": { - "amount": 30 - }, - "type": "n8n-nodes-base.wait", - "typeVersion": 1.1, - "position": [ - -320, - -96 - ], - "id": "13047e17-ea91-4283-996d-2579cf332aba", - "name": "Wait 30 Seconds", - "webhookId": "31cb717b-481a-497c-bc3b-e2aac99785d5" - }, - { - "parameters": { - "url": "=https://api.heygen.com/v1/video_status.get?video_id={{ $node['Generate Video (HeyGen)'].json.data.video_id }}", - "authentication": "genericCredentialType", - "genericAuthType": "httpHeaderAuth", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "X-Api-Key", - "value": "={{ $credentials.heygenApiKey }}" - } - ] - }, - "options": {} - }, - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.2, - "position": [ - -112, - -96 - ], - "id": "974e6b44-8504-49d3-9254-def9fbd6ffd0", - "name": "Check Video Status" - }, - { - "parameters": { - "conditions": { - "options": { - "caseSensitive": true, - "leftValue": "", - "typeValidation": "strict" - }, - "conditions": [ - { - "leftValue": "={{ $json.data.status }}", - "rightValue": "completed", - "operator": { - "type": "string", - "operation": "equals" - } - } - ], - "combinator": "and" - }, - "options": {} - }, - "type": "n8n-nodes-base.if", - "typeVersion": 2.1, - "position": [ - 80, - -96 - ], - "id": "1cc4cccf-8f4a-48c8-a9d0-1d99903f914e", - "name": "IF Video Complete" - }, - { - "parameters": { - "documentId": { - "__rl": true, - "value": "YOUR_GOOGLE_SHEET_ID_HERE", - "mode": "id" - }, - "sheetName": { - "__rl": true, - "value": "Job Applications", - "mode": "name" - }, - "options": {} - }, - "type": "n8n-nodes-base.googleSheets", - "typeVersion": 4.5, - "position": [ - 288, - -192 - ], - "id": "878c8087-c0a2-46e0-b26d-1600c0a07ad0", - "name": "Save to Google Sheet", - "credentials": { - "googleSheetsOAuth2Api": { - "id": "JlsdufNZLZl983CS", - "name": "Google Sheets account" - } - } - }, - { - "parameters": { - "operation": "addLabel" - }, - "type": "n8n-nodes-base.gmail", - "typeVersion": 2.1, - "position": [ - 912, - 400 - ], - "id": "f192e6c3-0c67-4808-9535-4bba936c4144", - "name": "Mark Email as Read", - "webhookId": "791e159a-0dfd-44c4-8b83-44fb4a65907d", - "credentials": { - "gmailOAuth2": { - "id": "pdqiqmxTBB5BaN3c", - "name": "Gmail account" - } - } - }, - { - "parameters": { - "options": {} - }, - "type": "n8n-nodes-base.set", - "typeVersion": 3.4, - "position": [ - -1920, - 208 - ], - "id": "635f7929-24e0-49ed-9436-2f3af59f5485", - "name": "Handle No URLs" - }, - { - "parameters": { - "operation": "appendOrUpdate", - "documentId": { - "__rl": true, - "value": "1xMELeUYUcGrqctnCQSWOuB586k1NHth0mgLhUVRu6EI", - "mode": "list", - "cachedResultName": "Job_Tracking_Sheet_Template", - "cachedResultUrl": "https://docs.google.com/spreadsheets/d/1xMELeUYUcGrqctnCQSWOuB586k1NHth0mgLhUVRu6EI/edit?usp=drivesdk" - }, - "sheetName": { - "__rl": true, - "value": 2083683147, - "mode": "list", - "cachedResultName": "Job_Tracking_Sheet_Template", - "cachedResultUrl": "https://docs.google.com/spreadsheets/d/1xMELeUYUcGrqctnCQSWOuB586k1NHth0mgLhUVRu6EI/edit#gid=2083683147" - }, - "columns": { - "mappingMode": "defineBelow", - "value": { - "Title": "={{ $('Extract Job Data').item.json.message.content.job_title }}", - "Job Description": "={{ $('Extract Job Data').item.json.message.content.job_description }}", - "Link": "={{ $('Extract Job URLs').item.json.jobUrl }}", - "Rating": "={{ $('Rate Job Match').item.json.message.content.rating }}", - "Benefits": "={{ $('Extract Job Data').item.json.message.content.benefits }}", - "Company Name": "={{ $('Extract Job Data').item.json.message.content.company_name }}", - "Location": "={{ $('Extract Job Data').item.json.message.content.location }}", - "Match Explanation": "={{ $('Rate Job Match').item.json.message.content.explanation }}", - "Cover Letter": "={{ $('Generate Cover Letter').item.json.message.content.cover_letter }}", - "Video Script": "={{ $json.message.content.video_script }}" - }, - "matchingColumns": [ - "Title" - ], - "schema": [ - { - "id": "Title", - "displayName": "Title", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "Job Description", - "displayName": "Job Description", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Link", - "displayName": "Link", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Date", - "displayName": "Date", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "Rating", - "displayName": "Rating", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Company Name", - "displayName": "Company Name", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Benefits", - "displayName": "Benefits", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Location", - "displayName": "Location", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Match Explanation", - "displayName": "Match Explanation", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Cover Letter", - "displayName": "Cover Letter", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Video Script", - "displayName": "Video Script", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - } - ], - "attemptToConvertTypes": false, - "convertFieldsToString": false - }, - "options": {} - }, - "type": "n8n-nodes-base.googleSheets", - "typeVersion": 4.5, - "position": [ - -512, - 112 - ], - "id": "d4d0a03b-3e4b-47be-a0de-4498ef7b18c8", - "name": "Save to Sheet (No Video)", - "credentials": { - "googleSheetsOAuth2Api": { - "id": "JlsdufNZLZl983CS", - "name": "Google Sheets account" - } - } - } - ], - "pinData": {}, - "connections": { - "Schedule (Daily 3 AM)": { - "main": [ - [ - { - "node": "Get LinkedIn Email IDs", - "type": "main", - "index": 0 - } - ] - ] - }, - "Get LinkedIn Email IDs": { - "main": [ - [ - { - "node": "Fetch Full Email via API", - "type": "main", - "index": 0 - } - ] - ] - }, - "Fetch Full Email via API": { - "main": [ - [ - { - "node": "Extract Job URLs", - "type": "main", - "index": 0 - } - ] - ] - }, - "Extract Job URLs": { - "main": [ - [ - { - "node": "Check Extraction Status", - "type": "main", - "index": 0 - } - ] - ] - }, - "Check Extraction Status": { - "main": [ - [ - { - "node": "Scrape Job Page", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Handle No URLs", - "type": "main", - "index": 0 - } - ] - ] - }, - "Scrape Job Page": { - "main": [ - [ - { - "node": "Extract Job Data", - "type": "main", - "index": 0 - } - ] - ] - }, - "Extract Job Data": { - "main": [ - [ - { - "node": "Rate Job Match", - "type": "main", - "index": 0 - } - ] - ] - }, - "Rate Job Match": { - "main": [ - [ - { - "node": "Generate Cover Letter", - "type": "main", - "index": 0 - } - ] - ] - }, - "Generate Cover Letter": { - "main": [ - [ - { - "node": "Create Video Script", - "type": "main", - "index": 0 - } - ] - ] - }, - "Create Video Script": { - "main": [ - [ - { - "node": "Save to Sheet (No Video)", - "type": "main", - "index": 0 - } - ] - ] - }, - "IF Rating >= 4": { - "main": [ - [ - { - "node": "Generate Video (HeyGen)", - "type": "main", - "index": 0 - } - ], - [] - ] - }, - "Generate Video (HeyGen)": { - "main": [ - [ - { - "node": "Wait 30 Seconds", - "type": "main", - "index": 0 - } - ] - ] - }, - "Wait 30 Seconds": { - "main": [ - [ - { - "node": "Check Video Status", - "type": "main", - "index": 0 - } - ] - ] - }, - "Check Video Status": { - "main": [ - [ - { - "node": "IF Video Complete", - "type": "main", - "index": 0 - } - ] - ] - }, - "IF Video Complete": { - "main": [ - [ - { - "node": "Save to Google Sheet", - "type": "main", - "index": 0 - } - ], - [ - { - "node": "Wait 30 Seconds", - "type": "main", - "index": 0 - } - ] - ] - }, - "Save to Google Sheet": { - "main": [ - [] - ] - }, - "Handle No URLs": { - "main": [ - [ - { - "node": "Mark Email as Read", - "type": "main", - "index": 0 - } - ] - ] - }, - "Save to Sheet (No Video)": { - "main": [ - [ - { - "node": "Mark Email as Read", - "type": "main", - "index": 0 - } - ] - ] - } - }, - "active": false, - "settings": { - "executionOrder": "v1" - }, - "versionId": "d66e6b25-8c61-44bb-9c24-c6a31c092eed", - "meta": { - "templateCredsSetupCompleted": true, - "instanceId": "b6eba3b68dafdb2b2f3d7bb56cb7beb02af714cb08ffcd6ccca73705d7806808" - }, - "id": "IUwEE2slj9P3mnt3", - "tags": [] -} diff --git a/Asheesh_Ranjan_Srivastava/Day-5/LICENSE b/Asheesh_Ranjan_Srivastava/Day-5/LICENSE deleted file mode 100644 index d52198d..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-5/LICENSE +++ /dev/null @@ -1,135 +0,0 @@ -GNU AFFERO GENERAL PUBLIC LICENSE -Version 3, 19 November 2007 - -Copyright (C) 2025 Asheesh Ranjan Srivastava - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . - -============================================================================== - -Quest And Crossfire LinkedIn AI - Serverless Application -AI-powered LinkedIn post generator with OAuth 2.0 authentication - -Built for OutSkill AI Engineering Bootcamp 2025 - Day 5 -Quest And Crossfireโ„ข ยฉ 2025 Asheesh Ranjan Srivastava - -============================================================================== - -COPYRIGHT HOLDER RIGHTS: - -As the copyright holder, Asheesh Ranjan Srivastava retains ALL RIGHTS -to use this code in any manner, including: -- Closed-source applications -- Commercial products -- Proprietary derivatives -- Alternative licensing arrangements - -AGPL-3.0 restrictions apply ONLY to derivative works created by others. - -For commercial licensing inquiries or alternative licensing arrangements: -Contact: asheesh.srivastava@questandcrossfire.com - -============================================================================== - -ADDITIONAL NOTICES: - -1. TRADEMARKS - - "Aethelgard Academy" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - "Quest And Crossfire" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - Use of these trademarks requires explicit permission - -2. AI ATTRIBUTION - This software was developed with assistance from: - - Claude Code (Anthropic) for technical implementation and debugging - - Human strategic decisions and quality control by Asheesh Ranjan Srivastava - - This represents human-AI collaboration in modern software development - -3. THIRD-PARTY SERVICES AND DEPENDENCIES - This application uses the following services: - - Vercel (serverless hosting platform) - - LinkedIn API (OAuth 2.0 + Share on LinkedIn) - - n8n (AI workflow automation) - - OpenAI API (GPT-4o-mini) - - Supabase PostgreSQL (database) - - Node.js libraries (jsonwebtoken, etc.) - - Each service is subject to its respective terms of service and pricing. - -4. BOOTCAMP ATTRIBUTION - This project was created as part of the OutSkill AI Engineering Bootcamp 2025. - Architecture, OAuth 2.0 implementation, 3-layer security design, and - Quest And Crossfire brand integration by Asheesh Ranjan Srivastava. - -5. SECURITY IMPLEMENTATION - This application implements: - - LinkedIn OAuth 2.0 authentication - - JWT session management (7-day expiration) - - 3-layer security architecture (frontend gate + backend JWT + email whitelist) - - Email whitelist protection (private application) - - HTTP-only secure cookies - - Environment variable management - -6. DATA AND PRIVACY - - This is a private application with email whitelist - - User data is stored in Supabase with RLS policies - - API keys and credentials required (not included) - - No warranty for production use or data privacy - -7. COPYLEFT NOTICE (AGPL-3.0) - Under AGPL-3.0, any derivative works or modifications must also be - released under AGPL-3.0 or compatible license. - - If you use this code in your project, you must: - - Make your source code available - - License your project under AGPL-3.0 - - Attribute the original work - - State your changes - - If used in a web service, provide source code to users - - AGPL-3.0 NETWORK USE CLAUSE: - If you run a modified version of this software as a web service - (e.g., Vercel deployment, cloud hosting), you MUST make the - complete source code available to users of that service. - -============================================================================== - -FULL SOURCE CODE AVAILABILITY: - -The complete source code for this project is publicly available at: -https://github.com/AsheeshSrivastava/quest-crossfire-linkedin-app - -This includes: -- All serverless functions (OAuth, generation, publishing) -- Frontend HTML/CSS/JavaScript -- Security implementation -- Comprehensive documentation (7 files, 6,148 lines) -- Setup guides and learning journey - -============================================================================== - -For the complete GNU Affero General Public License v3.0 text, see: -https://www.gnu.org/licenses/agpl-3.0.txt - -============================================================================== - -CONTACT: -Author: Asheesh Ranjan Srivastava -Email: asheesh.srivastava@questandcrossfire.com -Brand: Quest And Crossfire -Project: Aethelgard Academy -Live App: https://quest-crossfire-linkedin-app.vercel.app - -โ—‡ Where chaos becomes clarity. Small fixes, big clarity. diff --git a/Asheesh_Ranjan_Srivastava/Day-5/README.md b/Asheesh_Ranjan_Srivastava/Day-5/README.md deleted file mode 100644 index 97cacf9..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-5/README.md +++ /dev/null @@ -1,309 +0,0 @@ -# ๐ŸŽฏ Day 5: Quest And Crossfire LinkedIn AI - Serverless Application - -**Project:** AI-Powered LinkedIn Post Generator with OAuth 2.0 Authentication -**Type:** Production-ready serverless web application -**Deployment:** โœ… Live on Vercel -**Status:** Complete with 3-layer security implementation - ---- - -## ๐Ÿš€ Live Application - -**Production URL:** https://quest-crossfire-linkedin-app.vercel.app - -**Demo Credentials:** Private app (email whitelist) - ---- - -## ๐Ÿ“ฆ Project Repository - -**โญ Full project code and documentation available in separate GitHub repository:** - -๐Ÿ”— **GitHub:** https://github.com/AsheeshSrivastava/quest-crossfire-linkedin-app - -The codebase includes: -- Complete serverless backend (Vercel functions) -- Frontend with Quest And Crossfire branding -- OAuth 2.0 + JWT authentication -- 3-layer security implementation -- Comprehensive documentation (7 files, 6,148 lines) - ---- - -## ๐ŸŽฏ What It Does - -An AI-powered LinkedIn post generator that embodies the **Quest And Crossfireโ„ข** brand philosophy: - -- **Generates** professional LinkedIn posts using n8n + OpenAI GPT-4o-mini -- **Publishes** directly to LinkedIn via OAuth 2.0 integration -- **Secured** with 3-layer defense-in-depth architecture -- **Branded** with complete Quest And Crossfire visual identity - -**Key Philosophy:** *"Where chaos becomes clarity. Small fixes, big clarity."* - ---- - -## ๐Ÿ› ๏ธ Tech Stack - -### **Frontend** -- Vanilla JavaScript -- HTML/CSS with Quest And Crossfire branding -- Responsive UI with diamond (โ—‡) logo - -### **Backend (Serverless)** -- **Platform:** Vercel serverless functions -- **APIs:** 5 API endpoints - - `/api/auth/linkedin` - OAuth initiation - - `/api/auth/linkedin/callback` - OAuth callback with email whitelist - - `/api/auth/check` - Authentication verification - - `/api/generate` - AI post generation (secured) - - `/api/publish` - LinkedIn publishing (secured) - -### **AI & Automation** -- **n8n workflows:** AI agent with 200+ line brand-aware system prompt -- **OpenAI:** GPT-4o-mini for content generation -- **LinkedIn API:** OAuth 2.0 + Share API - -### **Security** -- **JWT sessions:** 7-day expiration, HTTP-only cookies -- **Email whitelist:** Restricted to `asheeshsrivastava9@gmail.com` -- **Environment variables:** 10 secrets secured in Vercel dashboard - -### **Database** -- **Supabase PostgreSQL:** Configured for future features (post history, analytics) - ---- - -## ๐Ÿ” Security Architecture (3 Layers) - -### **Layer 1: Frontend Authentication Gate** -- Checks authentication on page load -- Redirects to `/login.html` if not authenticated -- All API calls include credentials - -### **Layer 2: Backend JWT Verification** -- All secured endpoints verify JWT from session cookie -- Returns 401 Unauthorized if invalid -- Uses `getUserFromRequest()` helper - -### **Layer 3: Email Whitelist** -- OAuth callback only allows whitelisted email -- Prevents unauthorized LinkedIn account linking -- "Access Denied" message for non-whitelisted users - ---- - -## ๐ŸŽ“ What I Learned - -### **Technical Skills** -1. **OAuth 2.0 Flow Implementation** - - Authorization endpoint configuration - - Redirect URI management - - Token exchange - - Profile retrieval - -2. **JWT Session Management** - - Token creation and verification - - Secure cookie configuration (HttpOnly, Secure, SameSite) - - Session expiration handling - -3. **Serverless Architecture** - - Vercel function configuration - - Environment variable management - - CORS handling - - Stateless function design - -4. **LinkedIn API Integration** - - OAuth products enablement - - Share on LinkedIn endpoint - - Profile data retrieval - -### **Problem-Solving Journey** - -**4 Root Causes Fixed:** -1. **OAuth Callback 404:** Redirecting to non-existent `/dashboard.html` -2. **Redirect URI Mismatch:** Preview vs production URL confusion -3. **Missing Environment Variables:** Only configured locally, not in Vercel -4. **LinkedIn Products Not Enabled:** Required OAuth products not activated - -**Solution:** Systematic debugging + defense-in-depth security - -**Time Investment:** 6 hours (4.5 debugging + 1.5 documentation) - ---- - -## ๐Ÿ“Š Key Metrics - -**Development:** -- **Duration:** 2 sessions (~8 hours total) -- **Git Commits:** 7 major commits -- **Documentation:** 7 files, 6,148 lines -- **Code:** ~800 lines (frontend + backend) - -**Security:** -- **Layers:** 3 (frontend + backend + whitelist) -- **Session:** 7-day JWT expiration -- **Secrets:** 10 environment variables - -**Deployment:** -- **Platform:** Vercel (serverless) -- **Build Time:** ~2 minutes -- **Uptime:** 100% since Nov 2, 2025 - ---- - -## ๐ŸŽฏ Features - -### **Current Features (v1.0)** -- โœ… LinkedIn OAuth 2.0 login -- โœ… JWT session management -- โœ… AI post generation (n8n + GPT-4o-mini) -- โœ… Post preview and editing -- โœ… Character counter (3000 limit) -- โœ… LinkedIn publishing -- โœ… 3-layer security -- โœ… Email whitelist -- โœ… Quest And Crossfire branding - -### **Planned Features (Future)** -- โณ Logout button -- โณ Post history dashboard -- โณ Scheduling system -- โณ Analytics dashboard -- โณ Custom domain - ---- - -## ๐Ÿ“š Documentation - -**Complete documentation available in GitHub repository:** - -1. **FINAL-CHECKPOINT-2025-11-01.md** (590 lines) - - Complete session summary - - Security verification - - Production status - -2. **SESSION-LOG-2025-11-01-OAUTH-FIX.md** (1,424 lines) - - Complete 4.5-hour timeline - - All debugging steps - - Problems and solutions - -3. **LEARNING-BLOG.md** (1,567 lines) - - Technical deep-dive - - OAuth 2.0 concepts - - JWT implementation - - Security patterns - -4. **ACTION-PLAN.md** (775 lines) - - Future roadmap - - Feature prioritization - - Decision points - -5. **LINKEDIN-ARTICLE.md** (1,200 lines) - - Full article for publication - - Technical storytelling - -6. **LINKEDIN-ARTICLE-SHORT.md** (374 lines) - - 7-minute read version - - Key highlights - ---- - -## ๐Ÿ† Project Highlights - -### **What Makes This Special** - -**1. Production-Ready Security** -- Not just "works" - actually secure -- Defense-in-depth architecture -- Email whitelist for access control - -**2. Comprehensive Documentation** -- 6,148 lines of documentation -- Complete learning journey -- Transparent problem-solving - -**3. Brand Integration** -- Quest And Crossfire philosophy embedded -- Reflective, systematic tone in AI prompts -- Visual identity throughout - -**4. Real-World Application** -- Actually deployed and working -- Solving real content creation needs -- Used for generating LinkedIn posts - ---- - -## ๐Ÿ”— Links - -**Live Application:** https://quest-crossfire-linkedin-app.vercel.app -**GitHub Repository:** https://github.com/AsheeshSrivastava/quest-crossfire-linkedin-app -**Vercel Dashboard:** https://vercel.com/dashboard -**LinkedIn Developer:** https://www.linkedin.com/developers/apps - ---- - -## โš–๏ธ License & Attribution - -**License:** GNU Affero General Public License v3.0 (AGPL-3.0) - see [LICENSE](LICENSE) - -**Trademarks:** -- "Quest And Crossfire" is a trademark of Asheesh Ranjan Srivastava (Trademark Filed - awaiting certification) -- "Aethelgard Academy" is a trademark of Asheesh Ranjan Srivastava (Trademark Filed - awaiting certification) - -**Key Points:** -- Open source under AGPL-3.0 -- Network use clause: Must provide source code if deployed as web service -- You cannot use Quest And Crossfireโ„ข branding without permission -- Private instance currently restricted to authorized user only (email whitelist) - -**AI Collaboration:** -- Technical implementation: Claude Code (Anthropic) -- Strategic decisions: Human (Asheesh) -- Documentation: Human-AI collaboration - ---- - -## ๐Ÿ’ก Key Takeaways - -**For Portfolio:** -- Demonstrates OAuth 2.0 implementation skills -- Shows security-first mindset -- Proves systematic debugging abilities -- Exhibits comprehensive documentation practices - -**For Learning:** -- OAuth 2.0 flow mastery -- JWT session management -- Serverless architecture -- Defense-in-depth security - -**For Professional Growth:** -- Transparency in AI collaboration -- Thorough problem documentation -- Production-ready mindset -- Brand consistency - ---- - -## ๐Ÿš€ Access Instructions - -**For Reviewers:** - -This is a **private application** with email whitelist security. To review: - -1. **View GitHub Repository:** https://github.com/AsheeshSrivastava/quest-crossfire-linkedin-app -2. **Read Documentation:** Complete session logs and learning blog -3. **See Live App:** https://quest-crossfire-linkedin-app.vercel.app (login restricted) -4. **Watch Demo:** (Video to be created if needed) - -**Note:** The app is intentionally restricted to prevent unauthorized LinkedIn posting. - ---- - -**โ—‡ Where chaos becomes clarity. Small fixes, big clarity.** - -**Built with:** Claude Code (https://claude.com/claude-code) -**Deployed:** November 2, 2025 -**Status:** โœ… Production-ready, Secured, Documented diff --git a/Asheesh_Ranjan_Srivastava/Day-6/LICENSE b/Asheesh_Ranjan_Srivastava/Day-6/LICENSE deleted file mode 100644 index 9cef5de..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/LICENSE +++ /dev/null @@ -1,131 +0,0 @@ -GNU AFFERO GENERAL PUBLIC LICENSE -Version 3, 19 November 2007 - -Copyright (C) 2025 Asheesh Ranjan Srivastava - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . - -============================================================================== - -Advanced RAG Application Development -Complete RAG system with vector databases and production deployment - -Built for OutSkill AI Engineering Bootcamp 2025 - Day 6 -Quest And Crossfireโ„ข ยฉ 2025 Asheesh Ranjan Srivastava - -============================================================================== - -COPYRIGHT HOLDER RIGHTS: - -As the copyright holder, Asheesh Ranjan Srivastava retains ALL RIGHTS -to use this code in any manner, including: -- Closed-source applications -- Commercial products -- Proprietary derivatives -- Alternative licensing arrangements - -AGPL-3.0 restrictions apply ONLY to derivative works created by others. - -For commercial licensing inquiries or alternative licensing arrangements: -Contact: asheesh.srivastava@questandcrossfire.com - -============================================================================== - -ADDITIONAL NOTICES: - -1. TRADEMARKS - - "Aethelgard Academy" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - "Quest And Crossfire" is a trademark of Asheesh Ranjan Srivastava - (Trademark Filed - awaiting certification) - - Use of these trademarks requires explicit permission - -2. AI ATTRIBUTION - This software was developed with assistance from: - - Claude Code (Anthropic) for technical implementation - - Human strategic decisions and quality control by Asheesh Ranjan Srivastava - -3. THIRD-PARTY DEPENDENCIES - This software uses the following open-source libraries: - - LlamaIndex (MIT License) - - LanceDB (Apache 2.0 License) - - HuggingFace Transformers (Apache 2.0 License) - - HuggingFace Sentence Transformers (Apache 2.0 License) - - Gradio (Apache 2.0 License) - - OpenAI API (proprietary) - - Pydantic (MIT License) - -4. BOOTCAMP ATTRIBUTION - This project includes 4 complete assignments: - - Assignment 1: Vector Database Basics - - Assignment 2: Advanced RAG Techniques - - Assignment 3a: Basic Gradio RAG - - Assignment 3b: Advanced Gradio RAG - - Created as part of the OutSkill AI Engineering Bootcamp 2025. - Implementation and learning journey by Asheesh Ranjan Srivastava. - -5. BONUS: ENHANCED PRODUCTION APPLICATION - This submission includes documentation for the Aethelgard Concept Generator, - a production-ready RAG application deployed to HuggingFace Spaces. - - Live Demo: https://huggingface.co/spaces/asheeshsrivastava9/QnC - GitHub: https://github.com/AsheeshSrivastava/aethelgard-concept-generator - - The production application includes: - - Login system with authentication - - Frontend API key management - - File upload functionality (multiple PDFs) - - Advanced RAG techniques (SimilarityPostprocessor, TreeSummarize, Pydantic) - - Export to markdown - - Quest And Crossfire branding - - Comprehensive security (rate limiting, API key validation) - -6. DATA REQUIREMENTS - These notebooks require: - - Python reference materials (39 documents used in assignments) - - OpenAI API key for LLM functionality - - Course materials are NOT included due to copyright restrictions - - Users must provide their own learning materials or use public domain content - -7. COPYLEFT NOTICE (AGPL-3.0) - Under AGPL-3.0, any derivative works or modifications must also be - released under AGPL-3.0 or compatible license. - - If you use this code in your project, you must: - - Make your source code available - - License your project under AGPL-3.0 - - Attribute the original work - - State your changes - - If used in a web service, provide source code to users - - AGPL-3.0 NETWORK USE CLAUSE: - If you run a modified version of this software as a web service - (e.g., Hugging Face Space, Gradio deployment, cloud deployment), - you MUST make the complete source code available to users of that service. - -============================================================================== - -For the complete GNU Affero General Public License v3.0 text, see: -https://www.gnu.org/licenses/agpl-3.0.txt - -============================================================================== - -CONTACT: -Author: Asheesh Ranjan Srivastava -Email: asheesh.srivastava@questandcrossfire.com -Brand: Quest And Crossfire -Project: Aethelgard Academy - -โ—‡ Where chaos becomes clarity. Small fixes, big clarity. diff --git a/Asheesh_Ranjan_Srivastava/Day-6/README.md b/Asheesh_Ranjan_Srivastava/Day-6/README.md deleted file mode 100644 index 928c150..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/README.md +++ /dev/null @@ -1,369 +0,0 @@ -# ๐Ÿ“š Day 6: Advanced RAG Application Development - -**OutSkill AI Engineering Bootcamp 2025** -**Student:** Asheesh Ranjan Srivastava -**Date:** November 3, 2025 -**Status:** โœ… Complete (4/4 Assignments) - ---- - -## ๐ŸŽฏ Overview - -Day 6 focused on **Retrieval-Augmented Generation (RAG)** - the architecture behind ChatGPT Memory, Claude Projects, and Gemini Gems. Completed all 4 assignments covering vector databases, advanced RAG techniques, and Gradio application development. - ---- - -## ๐Ÿ“ Submission Contents - -### **Assignment 1: Vector Database Basics** โœ… -**File:** `assignment_1_vector_db_basics_SUBMISSION.ipynb` -**Size:** 27.9 KB - -**What I Built:** -- Complete RAG system with LlamaIndex + LanceDB -- Loaded and processed 39 Python reference documents -- Created 14,976 embeddings (384 dimensions) -- Implemented semantic search functionality - -**Key Technologies:** -- LlamaIndex (RAG framework) -- LanceDB (vector database) -- HuggingFace embeddings (BAAI/bge-small-en-v1.5) -- OpenAI GPT-4o-mini (LLM) - -**Learning Breakthrough:** -Realized RAG is the fundamental architecture used in: -- ChatGPT Memory & Projects -- Claude Projects (conversation persistence) -- Gemini Gems (custom AI assistants) - ---- - -### **Assignment 2: Advanced RAG Techniques** โœ… -**File:** `assignment_2_advanced_rag_SUBMISSION.ipynb` -**Size:** 39.7 KB - -**What I Built:** -Advanced RAG system implementing 4 production-ready techniques: - -**1. SimilarityPostprocessor** (Relevance Filtering) -```python -SimilarityPostprocessor(similarity_cutoff=0.3) -``` -- Filters low-relevance chunks -- 50-60% cost savings -- Improved response quality - -**2. TreeSummarize** (Response Synthesis) -```python -response_synthesizer=get_response_synthesizer( - response_mode=ResponseMode.TREE_SUMMARIZE -) -``` -- Hierarchical response generation -- Better multi-source synthesis -- Comprehensive analytical queries - -**3. Pydantic Structured Outputs** -```python -class ConceptCard(BaseModel): - title: str - problem: str - system: str - # ... 7 more fields -``` -- Type-safe JSON responses -- Guaranteed completeness -- Production reliability - -**4. Advanced Pipeline Architecture** -- Combined filtering + synthesis -- Query routing for different content types -- Error handling and validation - -**Cost Optimization:** 50-60% token savings through postprocessing - ---- - -### **Assignment 3a: Basic Gradio RAG** โœ… -**File:** `assignment_3a_basic_gradio_rag_SUBMISSION.ipynb` -**Size:** 22.7 KB - -**What I Built:** -- Simple Gradio UI integrated with RAG system -- User query input โ†’ RAG retrieval โ†’ Response display -- Basic interface for RAG interaction -- Foundation for advanced UI development - -**Technologies:** -- Gradio (UI framework) -- LlamaIndex (RAG backend) -- Simple interface pattern - ---- - -### **Assignment 3b: Advanced Gradio RAG** โœ… -**File:** `assignment_3b_advanced_gradio_rag_SUBMISSION.ipynb` -**Size:** 40.6 KB - -**What I Built:** -Advanced Gradio interface with full parameter configuration: -- Adjustable similarity threshold slider -- Top-k retrieval parameter control -- Response mode selection (TreeSummarize, Refine, Compact) -- Real-time parameter tuning -- Advanced UI components - -**Learning:** -- Interactive parameter exploration -- Trade-offs between different configurations -- Performance vs quality optimization - ---- - -## ๐Ÿš€ Bonus: Enhanced Production Application - -In addition to the 4 assignments, I built a **production-ready RAG application** deployed to HuggingFace Spaces. - -### **Aethelgard Concept Generator** ๐Ÿฐ -**Live Demo:** https://huggingface.co/spaces/asheeshsrivastava9/QnC -**GitHub:** https://github.com/AsheeshSrivastava/aethelgard-concept-generator - -**Features:** -- โœ… Login system (authentication) -- โœ… Frontend API key management (secure configuration) -- โœ… File upload functionality (multiple PDFs) -- โœ… Advanced RAG with all Assignment 2 techniques -- โœ… Export to markdown -- โœ… Quest And Crossfire branding -- โœ… Multi-tab interface (Settings, Setup, Generate) -- โœ… **Comprehensive security** (rate limiting, API key validation, clear key button) - -**Deployment Status:** -- โœ… Live on HuggingFace Spaces -- โœ… 6 dependency fixes applied -- โœ… Production-ready with security features -- โœ… Complete documentation - -**Tech Stack:** -- Gradio (UI) -- LlamaIndex + LanceDB (RAG backend) -- OpenAI GPT-4o-mini (LLM) -- HuggingFace embeddings (free) -- Python app.py (FastAPI-style architecture) - -**Why This Matters:** -- Demonstrates production deployment skills -- Shows security implementation (rate limiting, validation) -- Production debugging (6 dependency fixes) -- Portfolio-ready application - ---- - -## ๐Ÿ“Š Key Metrics - -### **Documents Processed:** -- **Assignment 1:** 39 Python reference PDFs -- **Assignment 2:** Same corpus, advanced techniques -- **Assignments 3a/3b:** Interactive RAG applications - -### **Embeddings Created:** -- **Total:** 14,976 vectors -- **Dimensions:** 384D -- **Model:** BAAI/bge-small-en-v1.5 (HuggingFace) -- **Cost:** $0 (free embeddings) - -### **Vector Database:** -- **Technology:** LanceDB -- **Size:** ~6MB index -- **Search Speed:** Sub-second similarity queries - -### **LLM Usage:** -- **Model:** OpenAI GPT-4o-mini -- **Cost:** ~$0.01-0.02 per query -- **Optimization:** 50-60% token savings with postprocessors - ---- - -## ๐ŸŽ“ What I Learned - -### **Technical Mastery:** -1. **Vector Database Fundamentals** - - Embedding creation and storage - - Similarity search algorithms - - Index management - - Disk-based persistence - -2. **Advanced RAG Architecture** - - Postprocessor pipelines - - Response synthesis strategies - - Query routing - - Cost optimization - -3. **Production Deployment** - - Dependency management (llama-index ecosystem) - - Environment configuration - - Security best practices - - HuggingFace Spaces deployment - -### **Strategic Insights:** -- **RAG is everywhere:** ChatGPT, Claude, Gemini all use this pattern -- **Checkpoints = RAG:** Session logs use same retrieval architecture -- **Cost matters:** Postprocessing can save 50-60% of LLM costs -- **Security first:** Rate limiting and validation in production - -### **Problem-Solving:** -- LanceDB embedding dimension mismatches -- OpenAI API key configuration (Windows env vars) -- Pydantic validation errors (prompt engineering) -- HuggingFace Spaces deployment (6 dependency fixes) -- OpenAI client compatibility (proxies parameter) - ---- - -## ๐Ÿ› ๏ธ How to Run Submissions - -### **Prerequisites:** -```bash -pip install llama-index llama-index-vector-stores-lancedb \ -llama-index-embeddings-huggingface llama-index-llms-openai \ -lancedb sentence-transformers openai gradio -``` - -### **Assignment 1-2:** -1. Open notebook in Jupyter -2. Set OpenAI API key: `os.environ["OPENAI_API_KEY"] = "sk-..."` -3. Run cells sequentially -4. Wait for embeddings creation (2-3 minutes) - -### **Assignment 3a-3b:** -1. Same setup as 1-2 -2. Run until Gradio interface launches -3. Access at http://127.0.0.1:7860 -4. Interact with RAG system via UI - -### **Bonus App:** -Visit live demo: https://huggingface.co/spaces/asheeshsrivastava9/QnC -1. Login: quest / crossfire -2. Settings: Enter your OpenAI API key -3. Setup: Upload PDFs or use existing data -4. Generate: Create concept cards - ---- - -## ๐Ÿ“ˆ Assignment Completion Summary - -| Assignment | Status | File | Key Features | -|------------|--------|------|--------------| -| **1. Vector DB Basics** | โœ… Complete | `assignment_1_...ipynb` | 39 docs, 14K embeddings, semantic search | -| **2. Advanced RAG** | โœ… Complete | `assignment_2_...ipynb` | 4 techniques, 50-60% cost savings | -| **3a. Gradio Basic** | โœ… Complete | `assignment_3a_...ipynb` | Simple UI, RAG integration | -| **3b. Gradio Advanced** | โœ… Complete | `assignment_3b_...ipynb` | Parameter tuning, advanced UI | -| **Bonus: Production App** | โœ… Deployed | HF Spaces | Security, rate limiting, live demo | - -**All Assignments:** โœ… 100% Complete -**Production Deployment:** โœ… Live on HuggingFace Spaces -**Documentation:** โœ… Comprehensive (4 notebooks + this README) - ---- - -## ๐Ÿ† Portfolio Value - -### **Skills Demonstrated:** -- โœ… RAG system architecture -- โœ… Vector database management -- โœ… Advanced retrieval techniques -- โœ… Cost optimization strategies -- โœ… Gradio UI development -- โœ… Production deployment -- โœ… Security implementation -- โœ… Dependency debugging - -### **Production Capabilities:** -- โœ… Can build complete RAG applications -- โœ… Can deploy to cloud platforms -- โœ… Can implement security features -- โœ… Can optimize for cost and performance -- โœ… Can debug complex dependency issues - ---- - -## ๐Ÿ”— Related Links - -**Live Applications:** -- Enhanced RAG App: https://huggingface.co/spaces/asheeshsrivastava9/QnC -- GitHub Repository: https://github.com/AsheeshSrivastava/aethelgard-concept-generator - -**Documentation:** -- LlamaIndex: https://docs.llamaindex.ai/ -- LanceDB: https://lancedb.com/ -- Gradio: https://gradio.app/ - -**Previous Submissions:** -- Day 2: Text Summarization -- Day 3: Multi-Persona Chatbot -- Day 4: LinkedIn Job Automation -- Day 5: Quest And Crossfire LinkedIn AI App - ---- - -## ๐Ÿ’ญ Reflections - -### **The "Aha!" Moment:** -Understanding that RAG is the architecture behind ChatGPT Memory, Claude Projects, and conversation persistence changed how I think about AI applications. It's not magic - it's vector similarity search + context retrieval + LLM synthesis. - -### **Most Valuable Learning:** -**Cost optimization through postprocessing.** Filtering irrelevant chunks BEFORE sending to the LLM can save 50-60% of costs without sacrificing quality. This is critical for production applications. - -### **Biggest Challenge:** -**Dependency hell.** The llama-index ecosystem went through a major namespace migration (0.9.48 โ†’ 0.10.x), requiring 6 coordinated version updates. This taught me the importance of version pinning and systematic debugging. - -### **Production Insight:** -**Security is not optional.** Even in a personal project, implementing rate limiting, API key validation, and proper error handling transforms code from "working" to "production-ready." - ---- - -## ๐ŸŽฏ Next Steps - -**For Learning:** -- โœ… Completed Day 6 โœ… -- โณ Continue to Day 7-14 assignments - -**For Production App:** -- โณ Monitor HuggingFace Spaces deployment -- โณ Test all security features -- โณ Add analytics and monitoring -- โณ Consider advanced features (post history, scheduling) - ---- - -## โš–๏ธ License & Attribution - -**Assignments 1-4:** Educational submissions for OutSkill AI Engineering Bootcamp -**Bonus App:** QUEST AND CROSSFIREโ„ข ยฉ Asheesh Ranjan Srivastava - -**AI Collaboration:** -- Technical implementation: Claude Code (Anthropic) -- Strategic decisions & debugging: Human (Asheesh) -- Learning documentation: Human-AI collaboration - ---- - -## ๐Ÿ“ง Contact - -**Student:** Asheesh Ranjan Srivastava -**Email:** asheeshsrivastava9@gmail.com -**Brand:** QUEST AND CROSSFIREโ„ข -**Philosophy:** *Where chaos becomes clarity. Small fixes, big clarity.* โ—‡ - ---- - -**Day 6 Status:** โœ… COMPLETE (4/4 Assignments + Bonus Production App) -**Total Learning:** 67,000+ words of documentation -**Production Deployment:** Live on HuggingFace Spaces -**Ready for Evaluation:** โœ… - ---- - -**Last Updated:** November 3, 2025 -**Version:** 1.0 - Final Submission diff --git a/Asheesh_Ranjan_Srivastava/Day-6/SUBMISSION_README_OLD.md b/Asheesh_Ranjan_Srivastava/Day-6/SUBMISSION_README_OLD.md deleted file mode 100644 index 2a45dbc..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/SUBMISSION_README_OLD.md +++ /dev/null @@ -1,398 +0,0 @@ -# ๐Ÿ“ฆ Day 6 Submission - Advanced RAG Application - -**OutSkill AI Engineering Bootcamp 2025** -**Student:** Asheesh Ranjan Srivastava -**Date:** November 3, 2025 -**Status:** โœ… Complete & Portfolio-Ready - ---- - -## ๐ŸŽฏ What I Built - -**Aethelgard Concept Generator** - A production-ready RAG application that generates comprehensive Python learning concept cards using advanced retrieval techniques. - -**Strategic Decision:** Built custom portfolio application instead of generic Assignments 3a/3b tutorials (same time investment, 10x portfolio value). - ---- - -## ๐Ÿ“ Submission Contents - -### **๐Ÿ† Main Application** - -#### `aethelgard_concept_generator_enhanced.ipynb` (30KB) -**The complete application with:** -- โœ… Login system (quest/crossfire) -- โœ… Frontend API key management -- โœ… File upload functionality (multiple PDFs) -- โœ… Database initialization with progress tracking -- โœ… Concept generation using advanced RAG -- โœ… Export to markdown with Quest And Crossfire branding - -**How to run:** -```bash -jupyter notebook aethelgard_concept_generator_enhanced.ipynb -# Access at: http://127.0.0.1:7861 or 7862 -``` - ---- - -### **๐Ÿ“š Core Documentation** - -#### `DAY_6_LEARNING_JOURNEY.md` (22KB) โญ **START HERE** -**Comprehensive learning documentation covering:** -- What I learned from Assignment 2 (4 advanced RAG techniques) -- Why I built custom app (strategic analysis) -- Technical implementation details -- Problems solved and iterations (5 major issues) -- Key learnings and reflections -- Portfolio value demonstration - -**Reading time:** ~20 minutes - ---- - -#### `ASSIGNMENT_2_COMPLETE_LEARNING_LOG.md` (41KB) -**Deep dive into Advanced RAG techniques:** -- SimilarityPostprocessor implementation and benefits -- TreeSummarize vs Refine vs Compact comparison -- HuggingFace embeddings setup -- LanceDB vector store configuration -- Complete code examples with explanations - -**Reading time:** ~30 minutes - ---- - -#### `ENHANCED_APP_FEATURES.md` (8.4KB) -**Feature-by-feature documentation:** -- Login system -- API key management -- File upload functionality -- Advanced RAG implementation -- Quest And Crossfire branding -- Export functionality - ---- - -#### `CUSTOM_APP_SUMMARY.md` (9.4KB) -**Strategic rationale:** -- Why custom app > generic assignments -- Comparison table (feature-by-feature) -- Portfolio value analysis -- Time investment justification - ---- - -#### `DEPLOYMENT_GUIDE.md` (16KB) -**Complete deployment strategy:** -- GitHub setup (what to include/exclude) -- Hugging Face Spaces deployment -- Bootcamp submission instructions -- Legal considerations -- 3 deployment scenarios - ---- - -### **๐Ÿ› ๏ธ Technical Files** - -#### `requirements.txt` (481 bytes) -All Python dependencies with exact versions. - -#### `.gitignore` (1.5KB) -Security protection (excludes PDFs, API keys, vector database). - -#### `data/README.md` -Instructions for adding Python reference PDFs. - ---- - -### **๐Ÿ› Debug Documentation** - -#### `DEBUG_SUMMARY.md` (5.8KB) -Path configuration issues and NotebookEdit tool failure documentation. - ---- - -### **๐Ÿ“ฆ Generated Files (Examples)** - -#### `concept_variables_m01.md` (4.3KB) -Example generated concept card showing output quality. - ---- - -## โœ… Assignment 2 Techniques - All Implemented - -### **1. SimilarityPostprocessor** โœ… -**In Production Code:** `aethelgard_concept_generator_enhanced.ipynb` Cell 6 -```python -node_postprocessors=[ - SimilarityPostprocessor(similarity_cutoff=0.3) -] -``` -**Benefit:** ~60% cost savings by filtering irrelevant chunks - ---- - -### **2. TreeSummarize** โœ… -**In Production Code:** `aethelgard_concept_generator_enhanced.ipynb` Cell 6 -```python -response_synthesizer=get_response_synthesizer( - response_mode=ResponseMode.TREE_SUMMARIZE -) -``` -**Benefit:** Coherent multi-source responses with hierarchical synthesis - ---- - -### **3. HuggingFace Embeddings** โœ… -**In Production Code:** `aethelgard_concept_generator_enhanced.ipynb` Cell 5 -```python -embed_model = HuggingFaceEmbedding( - model_name="BAAI/bge-small-en-v1.5" -) -``` -**Benefit:** Free embeddings (384D), no API costs - ---- - -### **4. LanceDB Vector Store** โœ… -**In Production Code:** `aethelgard_concept_generator_enhanced.ipynb` Cell 6 -```python -vector_store = LanceDBVectorStore( - uri=app_state.VECTOR_DB_PATH, - table_name="concepts" -) -``` -**Benefit:** Fast sub-second similarity search, disk-based storage - ---- - -## ๐ŸŽจ Bonus Features (Beyond Assignment Requirements) - -1. โœ… **Production Authentication** - Login system for security -2. โœ… **Frontend API Key Management** - No hardcoded secrets -3. โœ… **File Upload Functionality** - Upload PDFs directly in UI -4. โœ… **Quest And Crossfire Branding** - Full brand integration -5. โœ… **Export to Markdown** - Download generated concepts -6. โœ… **Multi-tab Interface** - Professional Gradio UI -7. โœ… **Statistics Tracking** - Generation count, timestamps -8. โœ… **Deployment Documentation** - GitHub + Hugging Face ready -9. โœ… **97KB Documentation** - Comprehensive guides -10. โœ… **Portfolio-Ready** - Production code quality - ---- - -## ๐Ÿ“Š Project Statistics - -### **Development:** -- **Time invested:** ~6-8 hours total -- **Iterations:** 5 major problem-solving cycles -- **Documentation:** 8 comprehensive files (~97KB) -- **Lines of code:** ~1,200 lines (notebook) - -### **Technical:** -- **Documents indexed:** 19 Python PDFs -- **Total embeddings:** 14,976 (384D vectors) -- **Vector database:** LanceDB (~6MB index) -- **Similarity threshold:** 0.3 cutoff -- **LLM:** OpenAI GPT-4o-mini (~$0.01-0.02 per concept) - -### **Application:** -- **Total features:** 10 major features -- **UI tabs:** 3 (Settings, Setup, Generate) -- **File upload:** Multiple PDFs supported -- **Export format:** Markdown with Q&C branding - ---- - -## ๐Ÿš€ How to Evaluate This Submission - -### **Option 1: Quick Review (10 minutes)** -1. Read `DAY_6_LEARNING_JOURNEY.md` (START HERE) โญ -2. Open `aethelgard_concept_generator_enhanced.ipynb` -3. Review code structure and comments -4. Check `ENHANCED_APP_FEATURES.md` for feature list - -### **Option 2: Full Review (30 minutes)** -1. Read `DAY_6_LEARNING_JOURNEY.md` (complete overview) -2. Read `ASSIGNMENT_2_COMPLETE_LEARNING_LOG.md` (technical deep dive) -3. Review `CUSTOM_APP_SUMMARY.md` (strategic justification) -4. Open and run the notebook (test the application) -5. Review `DEPLOYMENT_GUIDE.md` (production readiness) - -### **Option 3: Hands-On Testing (45 minutes)** -1. Install dependencies: `pip install -r requirements.txt` -2. Run notebook: `jupyter notebook aethelgard_concept_generator_enhanced.ipynb` -3. Login (quest/crossfire) -4. Settings tab: Enter OpenAI API key -5. Setup tab: Upload test PDFs OR use existing data/python folder -6. Setup tab: Initialize database (2-3 min) -7. Generate tab: Create a concept card -8. Export: Download markdown file -9. Review output quality - ---- - -## ๐Ÿ’ก Why This Submission Stands Out - -### **Technical Depth:** -- All 4 Assignment 2 techniques implemented correctly -- Production-ready architecture (not tutorial code) -- Advanced features (auth, upload, export) -- Proper error handling and user feedback - -### **Strategic Thinking:** -- Analyzed assignments, made data-driven decision -- Chose portfolio value over following instructions -- Same time investment, 10x better outcome -- Demonstrates systems thinking - -### **Professional Quality:** -- 97KB comprehensive documentation -- Security best practices (no hardcoded secrets) -- Deployment-ready (.gitignore, guides) -- Quest And Crossfire branding throughout - -### **Learning Demonstrated:** -- Deep understanding of RAG concepts -- Problem-solving documentation (5 major iterations) -- Reflective learning (what worked, what didn't) -- Modern software development (human-AI collaboration) - ---- - -## ๐ŸŽฏ Assignment Completion Verification - -### **Required: Assignment 2 - Advanced RAG** โœ… - -| Technique | Required | Implemented | Location | Working | -|-----------|----------|-------------|----------|---------| -| SimilarityPostprocessor | โœ… | โœ… | Cell 6 | โœ… | -| TreeSummarize | โœ… | โœ… | Cell 6 | โœ… | -| HuggingFace Embeddings | โœ… | โœ… | Cell 5 | โœ… | -| LanceDB Vector Store | โœ… | โœ… | Cell 6 | โœ… | - -**Status:** โœ… All 4 techniques implemented and working - ---- - -### **Optional: Assignments 3a & 3b - Gradio Apps** โš ๏ธ STRATEGIC PIVOT - -**Original Plan:** Build 2 generic Gradio apps from tutorials - -**My Decision:** Build 1 custom production app instead - -**Rationale:** -- Assignments 3a/3b: Generic tutorials, no new RAG concepts -- Same time investment (~4 hours) -- 10x more portfolio value -- Demonstrates strategic thinking -- Shows production readiness - -**See:** `CUSTOM_APP_SUMMARY.md` for complete analysis - -**Status:** โœ… Custom app exceeds combined 3a+3b requirements - ---- - -## ๐Ÿ“ž Submission Checklist - -- [x] Assignment 2 techniques implemented (4/4) -- [x] Production application built -- [x] Comprehensive documentation (8 files, 97KB) -- [x] Learning journey documented -- [x] Technical deep dive created -- [x] Deployment guide included -- [x] Strategic rationale explained -- [x] Code quality: Clean, commented, professional -- [x] Security: No hardcoded secrets -- [x] Portfolio-ready: Branding, features, docs -- [x] Git-ready: .gitignore, README, requirements.txt - -**Status:** โœ… ALL REQUIREMENTS MET + EXCEEDED - ---- - -## ๐Ÿ† Portfolio Location - -**Production Version:** `D:\Claude\portfolio\apps\aethelgard-concept-generator\` - -All files copied to portfolio folder with: -- Professional README -- PORTFOLIO_READY.md checklist -- Complete deployment documentation -- Ready for GitHub push + Hugging Face deployment - ---- - -## ๐Ÿ“ง Questions or Feedback? - -If you have questions about: -- **Technical implementation:** See `ASSIGNMENT_2_COMPLETE_LEARNING_LOG.md` -- **Strategic decisions:** See `CUSTOM_APP_SUMMARY.md` -- **Deployment:** See `DEPLOYMENT_GUIDE.md` -- **Learning journey:** See `DAY_6_LEARNING_JOURNEY.md` - -**Contact:** Asheesh Ranjan Srivastava -**Email:** asheeshsrivastava9@gmail.com -**Brand:** Quest And Crossfireโ„ข - ---- - -## ๐ŸŽ“ Learning Outcomes Achieved - -### **Technical:** -โœ… Advanced RAG architecture -โœ… Vector database management -โœ… Embedding model selection -โœ… Response synthesis techniques -โœ… Gradio UI development -โœ… File upload handling -โœ… Security best practices - -### **Strategic:** -โœ… Portfolio-first mindset -โœ… Strategic pivoting -โœ… Brand integration -โœ… Deployment planning -โœ… Documentation excellence - -### **Problem-Solving:** -โœ… Path configuration debugging -โœ… Tool limitation adaptation -โœ… Security implementation -โœ… User experience optimization -โœ… Legal awareness - ---- - -## โœจ Final Note - -This submission demonstrates not just **learning RAG techniques**, but **applying them in production**. - -It shows: -- โœ… Technical mastery (all 4 techniques) -- โœ… Strategic thinking (custom app choice) -- โœ… Production awareness (auth, security, deployment) -- โœ… Brand consistency (Quest And Crossfire) -- โœ… Documentation excellence (97KB guides) - -**That's the Quest And Crossfire way:** - -> **โ—‡ Where chaos becomes clarity. Small fixes, big clarity.** - ---- - -**Day 6 Status:** โœ… COMPLETE -**Assignment 2:** โœ… COMPLETE (4/4 techniques) -**Custom App:** โœ… PRODUCTION-READY -**Documentation:** โœ… COMPREHENSIVE (97KB) -**Portfolio:** โœ… READY FOR DEPLOYMENT - -**Ready for evaluation.** ๐Ÿš€ - ---- - -**Created:** November 3, 2025 -**Last Updated:** November 3, 2025 -**Version:** 1.0 - Final Submission diff --git a/Asheesh_Ranjan_Srivastava/Day-6/assignment_1_vector_db_basics_SUBMISSION.ipynb b/Asheesh_Ranjan_Srivastava/Day-6/assignment_1_vector_db_basics_SUBMISSION.ipynb deleted file mode 100644 index 52f18a4..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/assignment_1_vector_db_basics_SUBMISSION.ipynb +++ /dev/null @@ -1,686 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 1: Vector Database Creation and Retrieval\n", - "## Day 6 Session 2 - RAG Fundamentals\n", - "\n", - "**OBJECTIVE:** Create a vector database from a folder of documents and implement basic retrieval functionality.\n", - "\n", - "**LEARNING GOALS:**\n", - "- Understand document loading with SimpleDirectoryReader\n", - "- Learn vector store setup with LanceDB\n", - "- Implement vector index creation\n", - "- Perform semantic search and retrieval\n", - "\n", - "**DATASET:** Use the data folder in `Day_6/session_2/data/` which contains multiple file types\n", - "\n", - "**INSTRUCTIONS:**\n", - "1. Complete each function by replacing the TODO comments with actual implementation\n", - "2. Run each cell after completing the function to test it\n", - "3. The answers can be found in the existing notebooks in the `llamaindex_rag/` folder" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“ Setup: Configure Your API Key (Optional)\n", - "\n", - "**IMPORTANT:** This assignment primarily uses **local embeddings** (no API key required).\n", - "\n", - "However, if you want to use OpenAI or OpenRouter for LLM operations later:\n", - "\n", - "### Option 1: OpenAI API Key\n", - "Get your API key from: https://platform.openai.com/api-keys\n", - "\n", - "### Option 2: OpenRouter API Key (Recommended - cheaper!)\n", - "Get your API key from: https://openrouter.ai/keys\n", - "\n", - "### How to Enter Your API Key:\n", - "Run the cell below and enter your API key when prompted. It will be securely stored for this session." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# API Key Configuration (Optional - for future LLM operations)\n", - "import os\n", - "from getpass import getpass\n", - "\n", - "# Check if API key is already set in environment\n", - "if not os.getenv(\"OPENROUTER_API_KEY\") and not os.getenv(\"OPENAI_API_KEY\"):\n", - " print(\"\\n๐Ÿ”‘ API Key Setup (Optional)\")\n", - " print(\"=\" * 50)\n", - " print(\"This assignment uses LOCAL embeddings (no API key required).\")\n", - " print(\"\\nHowever, you can optionally configure an API key for future LLM operations:\")\n", - " print(\" 1. OpenAI API Key - https://platform.openai.com/api-keys\")\n", - " print(\" 2. OpenRouter API Key - https://openrouter.ai/keys (cheaper option)\")\n", - " print(\"\\nPress Enter to skip, or paste your API key below:\")\n", - " \n", - " api_key = getpass(\"API Key (or press Enter to skip): \").strip()\n", - " \n", - " if api_key:\n", - " # Detect which type of key it is\n", - " if api_key.startswith(\"sk-or-\"):\n", - " os.environ[\"OPENROUTER_API_KEY\"] = api_key\n", - " print(\"โœ… OpenRouter API key configured!\")\n", - " elif api_key.startswith(\"sk-\"):\n", - " os.environ[\"OPENAI_API_KEY\"] = api_key\n", - " print(\"โœ… OpenAI API key configured!\")\n", - " else:\n", - " print(\"โš ๏ธ Warning: API key format not recognized. Setting as OPENROUTER_API_KEY.\")\n", - " os.environ[\"OPENROUTER_API_KEY\"] = api_key\n", - " else:\n", - " print(\"โ„น๏ธ Skipping API key setup - using local embeddings only (perfect for this assignment!)\")\n", - "else:\n", - " print(\"โœ… API key already configured in environment\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“š Step 1: Import Required Libraries\n", - "\n", - "**What this does:**\n", - "- Imports LlamaIndex components for document loading, vector storage, and indexing\n", - "- Imports LanceDB for local vector database storage\n", - "- Imports HuggingFace embeddings for converting text to numerical vectors\n", - "\n", - "**Key Libraries:**\n", - "- `SimpleDirectoryReader`: Loads documents from folders\n", - "- `VectorStoreIndex`: Creates searchable index from documents\n", - "- `LanceDBVectorStore`: Local vector database (fast, no API needed)\n", - "- `HuggingFaceEmbedding`: Free, local text embeddings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import required libraries\n", - "import os\n", - "from pathlib import Path\n", - "from typing import List\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "\n", - "print(\"โœ… Libraries imported successfully!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## โš™๏ธ Step 2: Configure LlamaIndex Settings\n", - "\n", - "**What this does:**\n", - "- Configures LlamaIndex to use **local embeddings** (no API calls, completely free!)\n", - "- Uses the BAAI/bge-small-en-v1.5 model for converting text to 384-dimensional vectors\n", - "- This model runs on your computer, so no internet connection or API key needed\n", - "\n", - "**Why local embeddings?**\n", - "- โœ… Completely free (no API costs)\n", - "- โœ… Fast (runs on your machine)\n", - "- โœ… Private (your documents never leave your computer)\n", - "- โœ… Good quality for learning and many applications\n", - "\n", - "**Model Details:**\n", - "- BAAI/bge-small-en-v1.5: 384-dimensional embeddings, ~133MB model size\n", - "- First run will download the model (one-time, ~1-2 minutes)\n", - "- Subsequent runs use cached model (instant)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Configure LlamaIndex Settings (Using local embeddings - No API key needed)\n", - "def setup_llamaindex_settings():\n", - " \"\"\"\n", - " Configure LlamaIndex with local embeddings.\n", - " This assignment focuses on vector database operations using free, local models.\n", - " \"\"\"\n", - " # Check for API keys (optional, for future use)\n", - " has_openrouter = bool(os.getenv(\"OPENROUTER_API_KEY\"))\n", - " has_openai = bool(os.getenv(\"OPENAI_API_KEY\"))\n", - " \n", - " if not has_openrouter and not has_openai:\n", - " print(\"โ„น๏ธ No API key configured - that's OK for this assignment!\")\n", - " print(\" This assignment only uses local embeddings for vector operations.\")\n", - " else:\n", - " print(\"โœ… API key found (for optional future LLM operations)\")\n", - " \n", - " # Configure local embeddings (no API key required)\n", - " print(\"\\n๐Ÿ”„ Loading local embedding model...\")\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " print(\"โœ… LlamaIndex configured with local embeddings\")\n", - " print(\" Using BAAI/bge-small-en-v1.5 for document embeddings (384 dimensions)\")\n", - " print(\" First run may take 1-2 minutes to download model (~133MB)\")\n", - "\n", - "# Setup the configuration\n", - "setup_llamaindex_settings()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“‚ Function 1: Load Documents from Folder\n", - "\n", - "**Your Task:** Complete the `load_documents_from_folder()` function below.\n", - "\n", - "**What this function does:**\n", - "- Takes a folder path as input\n", - "- Uses `SimpleDirectoryReader` to automatically detect and load various file types\n", - "- Supports: PDFs, text files, Word docs, HTML, CSVs, and more\n", - "- Returns a list of Document objects that can be indexed\n", - "\n", - "**Key Concept - Document Loading:**\n", - "Document ingestion is the first step in any RAG system. We need to load various file types (PDFs, text, HTML, etc.) into memory before we can create embeddings and search them.\n", - "\n", - "**Parameters:**\n", - "- `input_dir`: Path to the folder containing documents\n", - "- `recursive=True`: Also load files from subdirectories\n", - "\n", - "**TODO:** Replace the `pass` statement with your implementation using `SimpleDirectoryReader`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def load_documents_from_folder(folder_path: str):\n", - " \"\"\"\n", - " Load documents from a folder using SimpleDirectoryReader.\n", - " \n", - " TODO: Complete this function to load documents from the given folder path.\n", - " HINT: Use SimpleDirectoryReader with recursive parameter to load all files\n", - " \n", - " Args:\n", - " folder_path (str): Path to the folder containing documents\n", - " \n", - " Returns:\n", - " List of documents loaded from the folder\n", - " \"\"\"\n", - " # TODO: Your code here\n", - " # Create SimpleDirectoryReader instance with recursive loading\n", - " # Load and return documents\n", - " pass\n", - "\n", - "# Test the function after you complete it\n", - "test_folder = \"data\"\n", - "documents = load_documents_from_folder(test_folder)\n", - "print(f\"Loaded {len(documents)} documents\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“– Understanding Vector Stores and Embeddings\n", - "\n", - "Before we create the vector store, let's understand some key concepts:\n", - "\n", - "### ๐Ÿ—๏ธ 1. What is an \"Instance\"?\n", - "\n", - "**Instance** = A working copy of an object\n", - "\n", - "Think of it like:\n", - "- **Class** (LanceDBVectorStore) = Blueprint for a house\n", - "- **Instance** (vector_store) = An actual house built from that blueprint\n", - "\n", - "```python\n", - "vector_store = LanceDBVectorStore(...) # Creating an instance\n", - "```\n", - "\n", - "Now `vector_store` is a working object you can use:\n", - "- `vector_store.add()` - Add documents\n", - "- `vector_store.query()` - Search documents\n", - "\n", - "---\n", - "\n", - "### ๐Ÿ—‚๏ธ 2. Where is the Database Created?\n", - "\n", - "When you specify `db_path = \"./vectordb\"`, it means:\n", - "- `./` = Current working directory (where Jupyter is running)\n", - "- Since you're in `D:\\Claude\\Bootcamp\\Day- 6\\`, the database will be created at:\n", - " - `D:\\Claude\\Bootcamp\\Day- 6\\vectordb\\`\n", - "\n", - "After running, you'll see a new folder with files like:\n", - "- `documents.lance` (the actual database)\n", - "- Index files\n", - "\n", - "---\n", - "\n", - "### ๐Ÿง  3. What are Document Embeddings? (MOST IMPORTANT!)\n", - "\n", - "**Embeddings** = Converting text to numbers that capture meaning\n", - "\n", - "**Example:**\n", - "\n", - "Original Documents (text):\n", - "- Doc 1: \"Python is a programming language\"\n", - "- Doc 2: \"JavaScript is used for web development\"\n", - "- Doc 3: \"I love cooking pasta\"\n", - "\n", - "Document Embeddings (numbers):\n", - "- Doc 1: `[0.8, 0.9, 0.1, 0.05, ...]` (384 numbers)\n", - "- Doc 2: `[0.75, 0.85, 0.15, 0.1, ...]` (384 numbers)\n", - "- Doc 3: `[0.1, 0.05, 0.9, 0.95, ...]` (384 numbers)\n", - "\n", - "**Why numbers?**\n", - "- Computers can't understand \"Python\" or \"programming\"\n", - "- BUT computers CAN measure distance between numbers!\n", - "- Similar meanings โ†’ Similar numbers\n", - "\n", - "---\n", - "\n", - "### ๐Ÿ“Š 4. Storing Documents vs Storing Embeddings\n", - "\n", - "**Traditional Search (Keyword Matching):**\n", - "```\n", - "Database stores: \"Python is a programming language\"\n", - "Search: \"coding languages\"\n", - "Result: โŒ NO MATCH (\"coding\" โ‰  \"programming\")\n", - "```\n", - "\n", - "**Vector Search (Semantic/Meaning-Based):**\n", - "```\n", - "Database stores: [0.8, 0.9, 0.1, ...] โ† Python doc embedding\n", - "Your query: \"coding languages\" โ†’ [0.78, 0.88, 0.12, ...]\n", - "Computer calculates: Distance = 0.05 (VERY CLOSE!)\n", - "Result: โœ… Returns Python doc (understands \"coding\" โ‰ˆ \"programming\")\n", - "```\n", - "\n", - "---\n", - "\n", - "### ๐ŸŽฏ The Magic of Embeddings:\n", - "\n", - "Embeddings understand **MEANING**, not just words:\n", - "\n", - "| Your Search | Traditional DB | Embedding DB |\n", - "|-------------|----------------|-------------|\n", - "| \"king\" | โŒ No match for \"queen\" | โœ… Finds \"queen\" (similar concept) |\n", - "| \"happy\" | โŒ No match for \"joyful\" | โœ… Finds \"joyful\" (same sentiment) |\n", - "| \"Python tutorial\" | โŒ No match for \"learn programming\" | โœ… Finds \"learn programming\" (same intent) |\n", - "\n", - "---\n", - "\n", - "### ๐Ÿ” Why RAG Uses Embeddings:\n", - "\n", - "When you ask: **\"How do AI agents work?\"**\n", - "\n", - "RAG system:\n", - "1. Converts your question to embedding: `[0.6, 0.7, 0.3, ...]`\n", - "2. Compares to ALL document embeddings in database\n", - "3. Finds documents with similar embeddings (similar meaning)\n", - "4. Returns: \"AI_Agent_Frameworks.pdf\" (even if it never says \"how do they work\")\n", - "\n", - "**That's the \"Retrieval\" in Retrieval-Augmented Generation!**" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ—„๏ธ Function 2: Create Vector Store\n", - "\n", - "**Your Task:** Complete the `create_vector_store()` function below.\n", - "\n", - "**What this function does:**\n", - "- Creates a local LanceDB vector database\n", - "- LanceDB stores document embeddings (numerical vectors) on your disk\n", - "- No API calls needed - everything runs locally\n", - "\n", - "**Key Concept - Vector Store:**\n", - "A vector store is a specialized database optimized for storing and searching high-dimensional vectors (embeddings). Unlike traditional databases that search by exact matches, vector databases find similar vectors using distance calculations.\n", - "\n", - "**Parameters:**\n", - "- `uri`: Path where the database files will be stored\n", - "- `table_name`: Name of the table to store document vectors (like a table in SQL)\n", - "\n", - "**Why LanceDB?**\n", - "- โœ… Works completely offline (no API calls)\n", - "- โœ… Fast similarity search\n", - "- โœ… Lightweight (~few MB for typical document collections)\n", - "- โœ… Perfect for learning and small-to-medium projects\n", - "\n", - "**TODO:** Complete the function by creating a `LanceDBVectorStore` instance" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_vector_store(db_path: str = \"./vectordb\", table_name: str = \"documents\"):\n", - " \"\"\"\n", - " Create a LanceDB vector store for storing document embeddings.\n", - " \n", - " TODO: Complete this function to create a LanceDB vector store.\n", - " HINT: Create the directory first, then instantiate LanceDBVectorStore with uri and table_name\n", - " \n", - " Args:\n", - " db_path (str): Path where the vector database will be stored\n", - " table_name (str): Name of the table in the vector database\n", - " \n", - " Returns:\n", - " LanceDBVectorStore: Configured vector store\n", - " \"\"\"\n", - " # TODO: Your code here\n", - " # Create the directory if it doesn't exist (use Path from pathlib)\n", - " # Create and return LanceDBVectorStore instance\n", - " pass\n", - "\n", - "# Test the function after you complete it\n", - "vector_store = create_vector_store(\"./assignment_vectordb\")\n", - "print(f\"Vector store created: {vector_store is not None}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ”— Function 3: Create Vector Index\n", - "\n", - "**Your Task:** Complete the `create_vector_index()` function below.\n", - "\n", - "**What this function does:**\n", - "- Takes your loaded documents and the vector store\n", - "- Creates embeddings for ALL documents (converts text to 384-dimensional vectors)\n", - "- Stores these embeddings in the vector database\n", - "- Returns an index that can be used for searching\n", - "\n", - "**Key Concept - Vector Index:**\n", - "The vector index is the searchable structure that connects your original documents with their embeddings. When you search, the index:\n", - "1. Converts your query to an embedding\n", - "2. Finds the closest document embeddings in the vector store\n", - "3. Returns the original document text\n", - "\n", - "**What happens during index creation:**\n", - "1. For each document โ†’ Generate embedding using BAAI/bge-small-en-v1.5\n", - "2. Store embedding in LanceDB vector store\n", - "3. Create searchable index structure\n", - "\n", - "**Time taken:**\n", - "- ~1-2 seconds per document (first time)\n", - "- For 39 documents โ‰ˆ 30-60 seconds\n", - "- Subsequent runs faster (embeddings cached)\n", - "\n", - "**TODO:** Complete the function by:\n", - "1. Creating a StorageContext with the vector store\n", - "2. Creating a VectorStoreIndex from documents using that storage context" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_vector_index(documents: List, vector_store):\n", - " \"\"\"\n", - " Create a vector index from documents using the provided vector store.\n", - " \n", - " TODO: Complete this function to create a searchable vector index.\n", - " HINT: Create StorageContext first, then use VectorStoreIndex.from_documents()\n", - " \n", - " Args:\n", - " documents: List of documents to index\n", - " vector_store: LanceDB vector store to use for storage\n", - " \n", - " Returns:\n", - " VectorStoreIndex: The created vector index\n", - " \"\"\"\n", - " # TODO: Your code here\n", - " # Create storage context with vector store\n", - " # Create index from documents\n", - " # This will: 1) Generate embeddings for all documents\n", - " # 2) Store embeddings in the vector store\n", - " pass\n", - "\n", - "# Test the function after you complete it\n", - "if documents and vector_store:\n", - " index = create_vector_index(documents, vector_store)\n", - " print(f\"Vector index created: {index is not None}\")\n", - " print(f\"Indexed {len(documents)} documents successfully!\")\n", - "else:\n", - " print(\"Complete previous functions first to test this one\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ” Function 4: Search Documents\n", - "\n", - "**Your Task:** Complete the `search_documents()` function below.\n", - "\n", - "**What this function does:**\n", - "- Takes a search query (plain English text)\n", - "- Converts query to an embedding\n", - "- Finds the most similar document embeddings in the vector store\n", - "- Returns the actual document text (not the embeddings)\n", - "\n", - "**Key Concept - Semantic Search:**\n", - "Unlike keyword search (exact word matching), semantic search finds documents with similar **meaning**:\n", - "- Query: \"machine learning tutorials\" โ†’ Finds: \"AI and deep learning guides\"\n", - "- Query: \"Italian food recipes\" โ†’ Finds: \"Cooking pasta and pizza\"\n", - "- Query: \"financial analysis\" โ†’ Finds: \"Investment and stock market data\"\n", - "\n", - "**How it works:**\n", - "1. Query \"What are AI agents?\" โ†’ Embedding: `[0.65, 0.73, 0.32, ...]`\n", - "2. Compare to all document embeddings using distance calculation\n", - "3. Find closest matches:\n", - " - Document A: Distance = 0.08 (VERY SIMILAR) โœ…\n", - " - Document B: Distance = 0.15 (SIMILAR) โœ…\n", - " - Document C: Distance = 0.89 (NOT SIMILAR) โŒ\n", - "4. Return top-k closest documents (e.g., top 3)\n", - "\n", - "**Parameters:**\n", - "- `similarity_top_k`: How many results to return (e.g., 3 means \"return 3 most similar documents\")\n", - "\n", - "**TODO:** Complete the function by:\n", - "1. Creating a retriever from the index with similarity_top_k parameter\n", - "2. Using the retriever to search for the query" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def search_documents(index, query: str, top_k: int = 3):\n", - " \"\"\"\n", - " Search for relevant documents using the vector index.\n", - " \n", - " TODO: Complete this function to perform semantic search on the index.\n", - " HINT: Use index.as_retriever() with similarity_top_k parameter, then retrieve(query)\n", - " \n", - " Args:\n", - " index: Vector index to search\n", - " query (str): Search query\n", - " top_k (int): Number of top results to return\n", - " \n", - " Returns:\n", - " List of retrieved document nodes\n", - " \"\"\"\n", - " # TODO: Your code here\n", - " # Create retriever from index with similarity_top_k\n", - " # Retrieve documents for the query\n", - " pass\n", - "\n", - "# Test the function after you complete it\n", - "if 'index' in locals() and index is not None:\n", - " test_query = \"What are AI agents?\"\n", - " results = search_documents(index, test_query, top_k=2)\n", - " print(f\"Found {len(results)} results for query: '{test_query}'\")\n", - " print(\"\\n๐Ÿ”Ž Search Results:\")\n", - " for i, result in enumerate(results, 1):\n", - " text_preview = result.text[:100] if hasattr(result, 'text') else 'No text'\n", - " score = f\" (Similarity: {result.score:.4f})\" if hasattr(result, 'score') else \"\"\n", - " print(f\" {i}. {text_preview}...{score}\")\n", - "else:\n", - " print(\"Complete all previous functions first to test this one\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿš€ Final Test: Complete RAG Pipeline\n", - "\n", - "**What this cell does:**\n", - "Once you've completed all 4 functions above, this cell will:\n", - "1. Run the complete vector database pipeline from start to finish\n", - "2. Test with multiple diverse search queries\n", - "3. Show similarity scores for each result\n", - "4. Verify that all components work together\n", - "\n", - "**Test Queries:**\n", - "We'll test with 4 different topics to demonstrate semantic search:\n", - "- AI and technology\n", - "- Agent evaluation\n", - "- Cooking and recipes\n", - "- Financial analysis\n", - "\n", - "This proves your vector database can handle diverse topics and find relevant results!\n", - "\n", - "**What to look for:**\n", - "- โœ… All 4 functions complete successfully\n", - "- โœ… Documents load (should see ~39 documents)\n", - "- โœ… Vector store and index created\n", - "- โœ… Search returns relevant results with similarity scores\n", - "- โœ… Higher scores (closer to 1.0) = more similar documents" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Final test of the complete pipeline\n", - "print(\"๐Ÿš€ Testing Complete Vector Database Pipeline\")\n", - "print(\"=\" * 50)\n", - "\n", - "# Re-run the complete pipeline to ensure everything works\n", - "data_folder = \"data\"\n", - "vector_db_path = \"./assignment_vectordb\"\n", - "\n", - "# Step 1: Load documents\n", - "print(\"\\n๐Ÿ“‚ Step 1: Loading documents...\")\n", - "documents = load_documents_from_folder(data_folder)\n", - "print(f\" Loaded {len(documents)} documents\")\n", - "\n", - "# Step 2: Create vector store\n", - "print(\"\\n๐Ÿ—„๏ธ Step 2: Creating vector store...\")\n", - "vector_store = create_vector_store(vector_db_path)\n", - "print(\" Vector store status:\", \"โœ… Created\" if vector_store else \"โŒ Failed\")\n", - "\n", - "# Step 3: Create vector index\n", - "print(\"\\n๐Ÿ”— Step 3: Creating vector index...\")\n", - "print(\" (This may take 30-60 seconds for ~39 documents...)\")\n", - "if documents and vector_store:\n", - " index = create_vector_index(documents, vector_store)\n", - " print(\" Index status:\", \"โœ… Created\" if index else \"โŒ Failed\")\n", - "else:\n", - " index = None\n", - " print(\" โŒ Cannot create index - missing documents or vector store\")\n", - "\n", - "# Step 4: Test multiple search queries\n", - "print(\"\\n๐Ÿ” Step 4: Testing search functionality...\")\n", - "if index:\n", - " search_queries = [\n", - " \"What are AI agents?\",\n", - " \"How to evaluate agent performance?\", \n", - " \"Italian recipes and cooking\",\n", - " \"Financial analysis and investment\"\n", - " ]\n", - " \n", - " for query in search_queries:\n", - " print(f\"\\n ๐Ÿ”Ž Query: '{query}'\")\n", - " results = search_documents(index, query, top_k=2)\n", - " \n", - " if results:\n", - " for i, result in enumerate(results, 1):\n", - " text_preview = result.text[:100] if hasattr(result, 'text') else \"No text available\"\n", - " score = f\" (Score: {result.score:.4f})\" if hasattr(result, 'score') else \"\"\n", - " print(f\" {i}. {text_preview}...{score}\")\n", - " else:\n", - " print(\" No results found\")\n", - "else:\n", - " print(\" โŒ Cannot test search - index not created\")\n", - "\n", - "print(\"\\n\" + \"=\" * 50)\n", - "print(\"๐ŸŽฏ Assignment Status:\")\n", - "print(f\" Documents loaded: {'โœ…' if documents else 'โŒ'}\")\n", - "print(f\" Vector store created: {'โœ…' if vector_store else 'โŒ'}\")\n", - "print(f\" Index created: {'โœ…' if index else 'โŒ'}\")\n", - "print(f\" Search working: {'โœ…' if index else 'โŒ'}\")\n", - "\n", - "if documents and vector_store and index:\n", - " print(\"\\n๐ŸŽ‰ Congratulations! You've successfully completed the assignment!\")\n", - " print(\" You've built a complete vector database with semantic search functionality!\")\n", - " print(\"\\n๐Ÿ“š What you learned:\")\n", - " print(\" โœ… Document loading from folders\")\n", - " print(\" โœ… Vector store setup with LanceDB\")\n", - " print(\" โœ… Document embedding and indexing\")\n", - " print(\" โœ… Semantic search (meaning-based, not keyword-based)\")\n", - " print(\"\\n๐Ÿš€ You're ready for Assignment 2: Advanced RAG techniques!\")\n", - "else:\n", - " print(\"\\n๐Ÿ“ Please complete the TODO functions above to finish the assignment.\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "bootcamp", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Asheesh_Ranjan_Srivastava/Day-6/assignment_2_advanced_rag_SUBMISSION.ipynb b/Asheesh_Ranjan_Srivastava/Day-6/assignment_2_advanced_rag_SUBMISSION.ipynb deleted file mode 100644 index d4c3d8c..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/assignment_2_advanced_rag_SUBMISSION.ipynb +++ /dev/null @@ -1,938 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 2: Advanced RAG Techniques\n", - "## Day 6 Session 2 - Advanced RAG Fundamentals\n", - "\n", - "**OBJECTIVE:** Implement advanced RAG techniques including postprocessors, response synthesizers, and structured outputs.\n", - "\n", - "**LEARNING GOALS:**\n", - "- Understand and implement node postprocessors for filtering and reranking\n", - "- Learn different response synthesis strategies (TreeSummarize, Refine)\n", - "- Create structured outputs using Pydantic models\n", - "- Build advanced retrieval pipelines with multiple processing stages\n", - "\n", - "**DATASET:** Use the same data folder as Assignment 1 (`data/`)\n", - "\n", - "**PREREQUISITES:** Complete Assignment 1 first\n", - "\n", - "**INSTRUCTIONS:**\n", - "1. Configure your OpenAI API key when prompted\n", - "2. Run each cell in order\n", - "3. Each technique builds on the previous one\n", - "4. Functions are already implemented - focus on understanding the concepts" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ”‘ Setup: Configure Your OpenAI API Key\n", - "\n", - "**REQUIRED for this assignment:** Advanced RAG techniques use LLM operations that require an API key.\n", - "\n", - "### Get Your API Key:\n", - "1. Go to: https://platform.openai.com/api-keys\n", - "2. Sign up or log in\n", - "3. Create a new API key\n", - "4. Copy the key (starts with `sk-proj-...` or `sk-...`)\n", - "\n", - "### Cost Estimate:\n", - "- Model: GPT-4o-mini (~$0.15 per 1M input tokens, ~$0.60 per 1M output tokens)\n", - "- This assignment: ~10-20 queries ร— ~500 tokens each = **$0.01 - $0.02 total cost**\n", - "- Very affordable for learning!\n", - "\n", - "### How to Enter Your API Key:\n", - "Run the cell below and paste your API key when prompted. It will be securely stored for this session only." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# OpenAI API Key Configuration (REQUIRED)\n", - "import os\n", - "from getpass import getpass\n", - "\n", - "# Check if API key is already set in environment\n", - "if not os.getenv(\"OPENAI_API_KEY\"):\n", - " print(\"\\n๐Ÿ”‘ OpenAI API Key Required\")\n", - " print(\"=\" * 50)\n", - " print(\"This assignment uses OpenAI GPT-4o-mini for LLM operations.\")\n", - " print(\"\\nGet your API key from: https://platform.openai.com/api-keys\")\n", - " print(\"Expected cost: ~$0.01-0.02 for this entire assignment\\n\")\n", - " \n", - " api_key = getpass(\"Paste your OpenAI API key: \").strip()\n", - " \n", - " if api_key:\n", - " os.environ[\"OPENAI_API_KEY\"] = api_key\n", - " print(\"\\nโœ… OpenAI API key configured successfully!\")\n", - " print(\" You're ready for advanced RAG operations.\")\n", - " else:\n", - " print(\"\\nโš ๏ธ No API key entered. LLM operations will fail.\")\n", - " print(\" Please run this cell again and enter your API key.\")\n", - "else:\n", - " print(\"โœ… OpenAI API key already configured in environment\")\n", - " print(\" Ready for advanced RAG operations!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“š Step 1: Import Advanced RAG Libraries\n", - "\n", - "**What this does:**\n", - "- Imports all necessary components for advanced RAG techniques\n", - "- Includes postprocessors, response synthesizers, and output parsers\n", - "- Imports Pydantic for structured outputs\n", - "\n", - "**New Components (vs Assignment 1):**\n", - "- `SimilarityPostprocessor`: Filters low-quality results\n", - "- `TreeSummarize`, `Refine`: Different ways to synthesize answers\n", - "- `PydanticOutputParser`: Creates structured, validated outputs\n", - "- `OpenAI`: LLM integration for generating responses" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import required libraries for advanced RAG\n", - "import os\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Any\n", - "from pydantic import BaseModel, Field\n", - "\n", - "# Core LlamaIndex components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.core.query_engine import RetrieverQueryEngine\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "\n", - "# Vector store\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "\n", - "# Embeddings and LLM (Using OpenAI)\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.llms.openai import OpenAI\n", - "\n", - "# Advanced RAG components\n", - "from llama_index.core.postprocessor import SimilarityPostprocessor\n", - "from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n", - "from llama_index.core.output_parsers import PydanticOutputParser\n", - "from llama_index.core.program import LLMTextCompletionProgram\n", - "\n", - "print(\"โœ… Advanced RAG libraries imported successfully!\")\n", - "print(\" Using OpenAI for LLM operations\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## โš™๏ธ Step 2: Configure Advanced RAG Settings\n", - "\n", - "**What this does:**\n", - "- Configures OpenAI GPT-4o-mini as the LLM (for generating responses)\n", - "- Uses local HuggingFace embeddings (same as Assignment 1, free!)\n", - "- Sets optimized chunk size for better precision\n", - "\n", - "**Why GPT-4o-mini?**\n", - "- โœ… Cost-effective (~10x cheaper than GPT-4)\n", - "- โœ… Fast responses (~1-2 seconds)\n", - "- โœ… Good quality for learning and many applications\n", - "- โœ… Perfect for this assignment (~$0.01-0.02 total)\n", - "\n", - "**Temperature = 0.1:**\n", - "- Low temperature = More consistent, focused responses\n", - "- Good for factual RAG applications\n", - "- Less creative randomness\n", - "\n", - "**Chunk Size = 512:**\n", - "- Smaller chunks = Better precision (find exact relevant parts)\n", - "- Assignment 1 used default (~1024)\n", - "- 512 is optimized for detailed retrieval" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Configure Advanced RAG Settings (Using OpenAI)\n", - "def setup_advanced_rag_settings():\n", - " \"\"\"\n", - " Configure LlamaIndex with optimized settings for advanced RAG.\n", - " Uses local embeddings and OpenAI for LLM operations.\n", - " \"\"\"\n", - " # Check for OpenAI API key\n", - " api_key = os.getenv(\"OPENAI_API_KEY\")\n", - " if not api_key:\n", - " print(\"โš ๏ธ OPENAI_API_KEY not found!\")\n", - " print(\" Please run the API key configuration cell above.\")\n", - " print(\" LLM operations will fail without an API key.\")\n", - " return False\n", - " \n", - " print(\"โœ… OpenAI API key found - configuring advanced RAG...\")\n", - " \n", - " # Configure OpenAI LLM\n", - " Settings.llm = OpenAI(\n", - " api_key=api_key,\n", - " model=\"gpt-4o-mini\", # Cost-effective model for learning\n", - " temperature=0.1 # Lower temperature for more consistent responses\n", - " )\n", - " print(\" Using model: gpt-4o-mini (cost-optimized)\")\n", - " print(\" Temperature: 0.1 (consistent, factual responses)\")\n", - " \n", - " # Configure local embeddings (no API key required, same as Assignment 1)\n", - " print(\"\\n๐Ÿ”„ Loading local embedding model...\")\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " # Advanced RAG configuration\n", - " Settings.chunk_size = 512 # Smaller chunks for better precision\n", - " Settings.chunk_overlap = 50\n", - " \n", - " print(\"โœ… Advanced RAG settings configured successfully!\")\n", - " print(\" - Chunk size: 512 (optimized for precision)\")\n", - " print(\" - Chunk overlap: 50 (maintains context across chunks)\")\n", - " print(\" - Using local embeddings (free, 384 dimensions)\")\n", - " print(\" - OpenAI LLM ready for response synthesis\")\n", - " return True\n", - "\n", - "# Setup the configuration\n", - "config_success = setup_advanced_rag_settings()\n", - "\n", - "if not config_success:\n", - " print(\"\\nโŒ Configuration failed. Please configure API key above and retry.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“‚ Step 3: Create Basic Index (Reuse from Assignment 1)\n", - "\n", - "**What this does:**\n", - "- Creates the foundational vector index that we'll enhance with advanced techniques\n", - "- Reuses the same concepts from Assignment 1 (document loading, vector store, indexing)\n", - "- Creates a separate database (`advanced_rag_vectordb`) so it doesn't conflict with Assignment 1\n", - "\n", - "**Why a separate database?**\n", - "- Assignment 1 database: `./assignment_vectordb/`\n", - "- Assignment 2 database: `./advanced_rag_vectordb/`\n", - "- Keeps assignments independent\n", - "- Uses optimized chunk size (512 vs default)\n", - "\n", - "**This is the foundation** - Advanced techniques in the following cells will enhance this basic index with:\n", - "- Similarity filtering\n", - "- Better response synthesis\n", - "- Structured outputs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Setup: Create index from Assignment 1 (reuse the basic functionality)\n", - "def setup_basic_index(data_folder: str = \"data\", force_rebuild: bool = False):\n", - " \"\"\"\n", - " Create a basic vector index that we'll enhance with advanced techniques.\n", - " This reuses the concepts from Assignment 1.\n", - " \"\"\"\n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./advanced_rag_vectordb\",\n", - " table_name=\"documents\"\n", - " )\n", - " \n", - " # Load documents\n", - " if not Path(data_folder).exists():\n", - " print(f\"โŒ Data folder not found: {data_folder}\")\n", - " print(\" Make sure you're in the correct directory with the 'data' folder.\")\n", - " return None\n", - " \n", - " print(f\"๐Ÿ“‚ Loading documents from: {data_folder}\")\n", - " reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n", - " documents = reader.load_data()\n", - " print(f\" Loaded {len(documents)} documents\")\n", - " \n", - " # Create storage context and index\n", - " print(\"\\n๐Ÿ”— Creating vector index...\")\n", - " print(\" (This may take 30-60 seconds for ~39 documents...)\")\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " print(f\"\\nโœ… Basic index created with {len(documents)} documents\")\n", - " print(\" Ready for advanced RAG techniques!\")\n", - " return index\n", - "\n", - "# Create the basic index\n", - "print(\"๐Ÿš€ Setting up basic index for advanced RAG...\")\n", - "print(\"=\" * 50)\n", - "index = setup_basic_index()\n", - "\n", - "if index:\n", - " print(\"\\n\" + \"=\" * 50)\n", - " print(\"โœ… Ready to implement advanced RAG techniques!\")\n", - " print(\" The following cells will add:\")\n", - " print(\" 1. Similarity filtering (remove irrelevant results)\")\n", - " print(\" 2. TreeSummarize (better response synthesis)\")\n", - " print(\" 3. Structured outputs (Pydantic models)\")\n", - " print(\" 4. Combined advanced pipeline\")\n", - "else:\n", - " print(\"\\nโŒ Failed to create index - check data folder path\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐ŸŽฏ Technique 1: Similarity Filtering (Postprocessor)\n", - "\n", - "**What this technique does:**\n", - "- Filters out retrieved chunks that score below a relevance threshold\n", - "- Improves response quality by removing \"noise\"\n", - "- Reduces API costs (fewer tokens sent to LLM)\n", - "\n", - "**Key Concept - Postprocessors:**\n", - "Postprocessors refine retrieval results **after** the initial vector search but **before** sending to the LLM. Think of it as a quality control step.\n", - "\n", - "**How Similarity Filtering Works:**\n", - "1. Vector search retrieves top 10 chunks\n", - "2. Each chunk has a similarity score (0.0 to 1.0)\n", - "3. SimilarityPostprocessor filters out chunks below threshold (e.g., 0.3)\n", - "4. Only high-quality chunks (score โ‰ฅ 0.3) go to the LLM\n", - "\n", - "**Example:**\n", - "```\n", - "Query: \"AI agent architectures\"\n", - "\n", - "Initial retrieval (10 chunks):\n", - "- Chunk 1: Score 0.85 โœ… (about AI agents - VERY RELEVANT)\n", - "- Chunk 2: Score 0.72 โœ… (about agent frameworks - RELEVANT)\n", - "- Chunk 3: Score 0.65 โœ… (about system design - RELEVANT)\n", - "- Chunk 4: Score 0.28 โŒ (about cooking recipes - NOT RELEVANT)\n", - "- Chunk 5: Score 0.15 โŒ (about finance - NOT RELEVANT)\n", - "- ... (5 more low-scoring chunks)\n", - "\n", - "After SimilarityPostprocessor (cutoff=0.3):\n", - "- Only Chunks 1, 2, 3 passed (scores โ‰ฅ 0.3)\n", - "- Result: Cleaner context for LLM, better answers\n", - "```\n", - "\n", - "**Why it matters:**\n", - "- โœ… Removes irrelevant results that confuse the LLM\n", - "- โœ… Reduces API costs (fewer tokens)\n", - "- โœ… Improves answer quality and focus\n", - "- โœ… Typical cutoff: 0.3 (adjustable based on your needs)\n", - "\n", - "**Parameters:**\n", - "- `similarity_cutoff`: Minimum score (0.0-1.0). Common: 0.3-0.5\n", - "- `top_k`: How many chunks to retrieve initially (before filtering)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_query_engine_with_similarity_filter(index, similarity_cutoff: float = 0.3, top_k: int = 10):\n", - " \"\"\"\n", - " Create a query engine that filters results based on similarity scores.\n", - " \n", - " Args:\n", - " index: Vector index to query\n", - " similarity_cutoff: Minimum similarity score (0.0 to 1.0)\n", - " top_k: Number of initial results to retrieve before filtering\n", - " \n", - " Returns:\n", - " Query engine with similarity filtering\n", - " \"\"\"\n", - " # Create similarity postprocessor with the cutoff threshold\n", - " similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n", - " \n", - " # Create query engine with similarity filtering\n", - " query_engine = index.as_query_engine(\n", - " similarity_top_k=top_k,\n", - " node_postprocessors=[similarity_processor]\n", - " )\n", - " \n", - " return query_engine\n", - "\n", - "# Test the function\n", - "if index:\n", - " print(\"๐Ÿ”ง Creating query engine with similarity filtering...\")\n", - " filtered_engine = create_query_engine_with_similarity_filter(index, similarity_cutoff=0.3)\n", - " \n", - " if filtered_engine:\n", - " print(\"โœ… Query engine with similarity filtering created\")\n", - " print(\" Settings: Retrieve 10, filter out scores < 0.3\")\n", - " \n", - " # Test query\n", - " test_query = \"What are the benefits of AI agents?\"\n", - " print(f\"\\n๐Ÿ” Testing query: '{test_query}'\")\n", - " print(\" (This will make an OpenAI API call - ~$0.001 cost)\\n\")\n", - " \n", - " # Test the response\n", - " response = filtered_engine.query(test_query)\n", - " print(f\"๐Ÿ“ Filtered Response:\\n{response}\")\n", - " \n", - " print(\"\\n๐Ÿ’ก Notice: Only high-quality, relevant chunks were used!\")\n", - " else:\n", - " print(\"โŒ Failed to create filtered query engine\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐ŸŒณ Technique 2: TreeSummarize (Response Synthesizer)\n", - "\n", - "**What this technique does:**\n", - "- Changes **how** the LLM combines multiple retrieved chunks into a final answer\n", - "- Uses hierarchical summarization (like building a tree from bottom to top)\n", - "- Better for complex analytical questions\n", - "\n", - "**Key Concept - Response Synthesizers:**\n", - "Response synthesizers control how retrieved information becomes the final answer. Different strategies work better for different query types.\n", - "\n", - "**Available Synthesizers:**\n", - "1. **TreeSummarize** (this cell):\n", - " - Builds response hierarchically\n", - " - Summarizes pairs of chunks, then summarizes summaries\n", - " - Good for: Comprehensive analysis, \"compare X and Y\", long responses\n", - "\n", - "2. **Refine** (not shown here):\n", - " - Iteratively improves answer chunk by chunk\n", - " - Good for: Detailed explanations, evolving answers\n", - "\n", - "3. **CompactAndRefine** (not shown here):\n", - " - Combines chunks first, then refines\n", - " - Good for: Balance between quality and speed\n", - "\n", - "**How TreeSummarize Works:**\n", - "```\n", - "Retrieved Chunks: [A, B, C, D]\n", - "\n", - "Level 1 (pair summaries):\n", - " Summary_AB = Summarize(A, B)\n", - " Summary_CD = Summarize(C, D)\n", - "\n", - "Level 2 (combine summaries):\n", - " Final_Answer = Summarize(Summary_AB, Summary_CD)\n", - "```\n", - "\n", - "**Example Query Types:**\n", - "- โœ… \"Compare the advantages and disadvantages of X\"\n", - "- โœ… \"Explain the evolution of Y from early to modern\"\n", - "- โœ… \"Analyze the relationship between A and B\"\n", - "- โŒ \"What is X?\" (simple factual - default synthesizer is fine)\n", - "\n", - "**Why it matters:**\n", - "- โœ… More comprehensive answers for complex queries\n", - "- โœ… Better synthesis across multiple sources\n", - "- โœ… Maintains context across many chunks\n", - "- โš ๏ธ Slightly more API calls (but better quality)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_query_engine_with_tree_summarize(index, top_k: int = 5):\n", - " \"\"\"\n", - " Create a query engine that uses TreeSummarize for comprehensive responses.\n", - " \n", - " Args:\n", - " index: Vector index to query\n", - " top_k: Number of results to retrieve\n", - " \n", - " Returns:\n", - " Query engine with TreeSummarize synthesis\n", - " \"\"\"\n", - " # Create TreeSummarize response synthesizer\n", - " tree_synthesizer = TreeSummarize()\n", - " \n", - " # Create query engine with the synthesizer\n", - " query_engine = index.as_query_engine(\n", - " similarity_top_k=top_k,\n", - " response_synthesizer=tree_synthesizer\n", - " )\n", - " \n", - " return query_engine\n", - "\n", - "# Test the function\n", - "if index:\n", - " print(\"๐ŸŒณ Creating query engine with TreeSummarize...\")\n", - " tree_engine = create_query_engine_with_tree_summarize(index)\n", - " \n", - " if tree_engine:\n", - " print(\"โœ… Query engine with TreeSummarize created\")\n", - " print(\" Best for: Analytical queries, comparisons, comprehensive answers\")\n", - " \n", - " # Test with a complex analytical query\n", - " analytical_query = \"Compare the advantages and disadvantages of different AI agent frameworks\"\n", - " print(f\"\\n๐Ÿ” Testing analytical query: '{analytical_query}'\")\n", - " print(\" (This will make OpenAI API calls for hierarchical summarization)\\n\")\n", - " \n", - " # Test the response\n", - " response = tree_engine.query(analytical_query)\n", - " print(f\"๐Ÿ“ TreeSummarize Response:\\n{response}\")\n", - " \n", - " print(\"\\n๐Ÿ’ก Notice: More comprehensive analysis by building answer hierarchically!\")\n", - " else:\n", - " print(\"โŒ Failed to create TreeSummarize query engine\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“Š Technique 3: Structured Outputs (Pydantic Models)\n", - "\n", - "**What this technique does:**\n", - "- Forces LLM to return data in a specific, validated structure\n", - "- Uses Pydantic models to define the exact output format\n", - "- Essential for API endpoints, databases, and data pipelines\n", - "\n", - "**Key Concept - Structured Outputs:**\n", - "Instead of free-text responses, you get type-safe, validated data structures that applications can reliably process.\n", - "\n", - "**Problem with Free-Text Responses:**\n", - "```python\n", - "# Free-text response (unpredictable)\n", - "response = \"AI agents are systems that can reason. Key capabilities include planning, tool use...\"\n", - "\n", - "# How do you extract:\n", - "# - The title?\n", - "# - List of key points? (parsing is error-prone)\n", - "# - Applications? (where do they start/end?)\n", - "```\n", - "\n", - "**Solution with Structured Outputs:**\n", - "```python\n", - "# Structured response (predictable)\n", - "response = ResearchPaperInfo(\n", - " title=\"AI Agents and Their Capabilities\",\n", - " key_points=[\"reasoning\", \"planning\", \"tool execution\"],\n", - " applications=[\"autonomous systems\", \"financial analysis\"],\n", - " summary=\"AI agents are autonomous systems...\"\n", - ")\n", - "\n", - "# Easy to use:\n", - "print(response.title) # Direct access\n", - "for point in response.key_points: # Iterate list\n", - " print(point)\n", - "```\n", - "\n", - "**Pydantic Model Example:**\n", - "```python\n", - "class ResearchPaperInfo(BaseModel):\n", - " title: str # Must be a string\n", - " key_points: List[str] # Must be a list of strings\n", - " applications: List[str] # Must be a list of strings\n", - " summary: str # Must be a string\n", - "```\n", - "\n", - "**Why it matters:**\n", - "- โœ… **Predictable outputs** - Always the same structure\n", - "- โœ… **Type safety** - Pydantic validates data types\n", - "- โœ… **Easy integration** - Works with databases, APIs, JSON\n", - "- โœ… **Error prevention** - Catches invalid outputs early\n", - "\n", - "**Use Cases:**\n", - "- REST API endpoints (return JSON)\n", - "- Database inserts (structured records)\n", - "- Data pipelines (consistent format)\n", - "- Frontend applications (predictable data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# First, define the Pydantic model for structured outputs \n", - "class ResearchPaperInfo(BaseModel):\n", - " \"\"\"Structured information about a research paper or AI concept.\"\"\"\n", - " title: str = Field(description=\"The main title or concept name\")\n", - " key_points: List[str] = Field(description=\"3-5 main points or findings\")\n", - " applications: List[str] = Field(description=\"Practical applications or use cases\")\n", - " summary: str = Field(description=\"Brief 2-3 sentence summary\")\n", - "\n", - "def create_structured_output_program(output_model: BaseModel = ResearchPaperInfo):\n", - " \"\"\"\n", - " Create a structured output program using Pydantic models.\n", - " \n", - " Args:\n", - " output_model: Pydantic model class for structured output\n", - " \n", - " Returns:\n", - " LLMTextCompletionProgram that returns structured data\n", - " \"\"\"\n", - " # Create output parser with the Pydantic model\n", - " output_parser = PydanticOutputParser(output_cls=output_model)\n", - " \n", - " # Create the structured output program\n", - " prompt_template_str = \"\"\"\n", - " Based on the following context and query, extract structured information.\n", - " \n", - " Context: {context}\n", - " Query: {query}\n", - " \n", - " {format_instructions}\n", - " \"\"\"\n", - " \n", - " program = LLMTextCompletionProgram.from_defaults(\n", - " output_parser=output_parser,\n", - " prompt_template_str=prompt_template_str,\n", - " verbose=True\n", - " )\n", - "\n", - " return program\n", - "\n", - "# Test the function\n", - "if index:\n", - " print(\"๐Ÿ“Š Creating structured output program...\")\n", - " structured_program = create_structured_output_program(ResearchPaperInfo)\n", - " \n", - " if structured_program:\n", - " print(\"โœ… Structured output program created\")\n", - " print(\" Output format: ResearchPaperInfo (Pydantic model)\")\n", - " print(\" Fields: title, key_points, applications, summary\")\n", - " \n", - " # Test with retrieval and structured extraction\n", - " structure_query = \"Tell me about AI agents and their capabilities\"\n", - " print(f\"\\n๐Ÿ” Testing structured query: '{structure_query}'\")\n", - " \n", - " # Get context for structured extraction\n", - " print(\" Step 1: Retrieving relevant context...\")\n", - " retriever = VectorIndexRetriever(index=index, similarity_top_k=3)\n", - " nodes = retriever.retrieve(structure_query)\n", - " context = \"\\n\".join([node.text for node in nodes])\n", - " print(f\" Retrieved {len(nodes)} relevant chunks\")\n", - " \n", - " # Generate structured response\n", - " print(\"\\n Step 2: Generating structured output...\")\n", - " print(\" (This will make an OpenAI API call)\\n\")\n", - " response = structured_program(context=context, query=structure_query)\n", - " \n", - " print(f\"๐Ÿ“Š Structured Response:\")\n", - " print(f\"\\n Title: {response.title}\")\n", - " print(f\"\\n Key Points:\")\n", - " for i, point in enumerate(response.key_points, 1):\n", - " print(f\" {i}. {point}\")\n", - " print(f\"\\n Applications:\")\n", - " for i, app in enumerate(response.applications, 1):\n", - " print(f\" {i}. {app}\")\n", - " print(f\"\\n Summary: {response.summary}\")\n", - " \n", - " print(\"\\n๐Ÿ’ก Output format validated:\")\n", - " print(f\" โœ… Type: {type(response).__name__}\")\n", - " print(f\" โœ… Title: {type(response.title).__name__}\")\n", - " print(f\" โœ… Key points: List with {len(response.key_points)} items\")\n", - " print(f\" โœ… Applications: List with {len(response.applications)} items\")\n", - " print(f\" โœ… Summary: {len(response.summary)} characters\")\n", - " else:\n", - " print(\"โŒ Failed to create structured output program\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿš€ Technique 4: Advanced RAG Pipeline (Combining All Techniques)\n", - "\n", - "**What this technique does:**\n", - "- Combines multiple advanced techniques into a single powerful query engine\n", - "- Similarity filtering **+** TreeSummarize response synthesis\n", - "- Best of both worlds: Clean results + comprehensive answers\n", - "\n", - "**Key Concept - Production RAG Systems:**\n", - "In real-world applications, you rarely use just one technique. Production RAG systems combine multiple techniques for optimal results.\n", - "\n", - "**How the Advanced Pipeline Works:**\n", - "```\n", - "User Query: \"Analyze AI agent architectures\"\n", - " โ†“\n", - "Step 1: Vector Search\n", - " โ†’ Retrieve top 10 chunks from vector database\n", - " โ†“\n", - "Step 2: Similarity Filtering (Postprocessor)\n", - " โ†’ Filter out chunks with score < 0.3\n", - " โ†’ Result: 5-7 high-quality chunks\n", - " โ†“\n", - "Step 3: TreeSummarize (Response Synthesizer)\n", - " โ†’ Build hierarchical summary of chunks\n", - " โ†’ Level 1: Pair-wise summaries\n", - " โ†’ Level 2: Combine into final answer\n", - " โ†“\n", - "Final Response: Comprehensive, relevant, well-synthesized answer\n", - "```\n", - "\n", - "**Benefits of Combining Techniques:**\n", - "1. **Similarity Filtering** removes noise โ†’ Cleaner input for LLM\n", - "2. **TreeSummarize** builds comprehensive answer โ†’ Better output quality\n", - "3. **Together** โ†’ High-quality results + comprehensive analysis\n", - "\n", - "**When to use this:**\n", - "- โœ… Production applications (where quality matters)\n", - "- โœ… Complex analytical queries\n", - "- โœ… When you need both precision and comprehensiveness\n", - "- โœ… API endpoints serving end users\n", - "\n", - "**When NOT to use this:**\n", - "- โŒ Simple factual queries (\"What is X?\") - basic RAG is fine\n", - "- โŒ Extremely cost-sensitive applications - more API calls\n", - "- โŒ Real-time systems needing <100ms response - adds latency" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_advanced_rag_pipeline(index, similarity_cutoff: float = 0.3, top_k: int = 10):\n", - " \"\"\"\n", - " Create a comprehensive advanced RAG pipeline combining multiple techniques.\n", - " \n", - " Args:\n", - " index: Vector index to query\n", - " similarity_cutoff: Minimum similarity score for filtering\n", - " top_k: Number of initial results to retrieve\n", - " \n", - " Returns:\n", - " Advanced query engine with filtering and synthesis combined\n", - " \"\"\"\n", - " # Create similarity postprocessor\n", - " similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n", - " \n", - " # Create TreeSummarize for comprehensive responses\n", - " tree_synthesizer = TreeSummarize()\n", - " \n", - " # Create the comprehensive query engine combining both techniques\n", - " advanced_engine = index.as_query_engine(\n", - " similarity_top_k=top_k,\n", - " node_postprocessors=[similarity_processor],\n", - " response_synthesizer=tree_synthesizer\n", - " )\n", - " \n", - " return advanced_engine\n", - "\n", - "# Test the comprehensive pipeline\n", - "if index:\n", - " print(\"๐Ÿš€ Creating advanced RAG pipeline...\")\n", - " print(\" Combining:\")\n", - " print(\" - Similarity filtering (remove noise)\")\n", - " print(\" - TreeSummarize (comprehensive synthesis)\")\n", - " \n", - " advanced_pipeline = create_advanced_rag_pipeline(index)\n", - " \n", - " if advanced_pipeline:\n", - " print(\"\\nโœ… Advanced RAG pipeline created successfully!\")\n", - " print(\" ๐Ÿ”ง Similarity filtering: โœ… (cutoff 0.3)\")\n", - " print(\" ๐ŸŒณ TreeSummarize synthesis: โœ…\")\n", - " \n", - " # Test with complex query\n", - " complex_query = \"Analyze the current state and future potential of AI agent technologies\"\n", - " print(f\"\\n๐Ÿ” Testing complex query: '{complex_query}'\")\n", - " print(\" (This combines both techniques for best results)\\n\")\n", - " \n", - " # Test the response\n", - " response = advanced_pipeline.query(complex_query)\n", - " print(f\"๐Ÿš€ Advanced RAG Response:\\n{response}\")\n", - " \n", - " print(\"\\n๐ŸŽฏ This response provides:\")\n", - " print(\" โœ… Filtered relevant results only (no noise)\")\n", - " print(\" โœ… Comprehensive analytical response (hierarchical synthesis)\")\n", - " print(\" โœ… Production-quality output\")\n", - " else:\n", - " print(\"โŒ Failed to create advanced RAG pipeline\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ†š Final Test: Compare Basic vs Advanced RAG\n", - "\n", - "**What this cell does:**\n", - "- Tests the same queries with **basic RAG** vs **advanced RAG**\n", - "- Shows you the quality improvements from advanced techniques\n", - "- Validates that all 5 components work correctly\n", - "\n", - "**Components to Test:**\n", - "1. โœ… Basic Index (foundation)\n", - "2. โœ… Similarity Filter (postprocessor)\n", - "3. โœ… TreeSummarize (response synthesizer)\n", - "4. โœ… Structured Output (Pydantic models)\n", - "5. โœ… Advanced Pipeline (combined techniques)\n", - "\n", - "**Test Queries:**\n", - "- Query 1: Key capabilities (factual)\n", - "- Query 2: Evaluation metrics (analytical)\n", - "- Query 3: Benefits and challenges (comparative)\n", - "\n", - "**What to look for:**\n", - "- Basic RAG: Functional answers\n", - "- Advanced RAG: More focused, comprehensive, better-synthesized answers\n", - "\n", - "**Expected differences:**\n", - "- Advanced responses should be more relevant (filtered)\n", - "- Advanced responses should be more comprehensive (TreeSummarize)\n", - "- Less irrelevant information in advanced responses" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Final comparison: Basic vs Advanced RAG\n", - "print(\"๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\")\n", - "print(\"=\" * 60)\n", - "\n", - "# Test queries for comparison\n", - "test_queries = [\n", - " \"What are the key capabilities of AI agents?\",\n", - " \"How do you evaluate agent performance metrics?\",\n", - " \"Explain the benefits and challenges of multimodal AI systems\"\n", - "]\n", - "\n", - "# Check if all components were created\n", - "components_status = {\n", - " \"Basic Index\": index is not None,\n", - " \"Similarity Filter\": 'filtered_engine' in locals() and filtered_engine is not None,\n", - " \"TreeSummarize\": 'tree_engine' in locals() and tree_engine is not None,\n", - " \"Structured Output\": 'structured_program' in locals() and structured_program is not None,\n", - " \"Advanced Pipeline\": 'advanced_pipeline' in locals() and advanced_pipeline is not None\n", - "}\n", - "\n", - "print(\"\\n๐Ÿ“Š Component Status:\")\n", - "for component, status in components_status.items():\n", - " status_icon = \"โœ…\" if status else \"โŒ\"\n", - " print(f\" {status_icon} {component}\")\n", - "\n", - "# Create basic query engine for comparison\n", - "if index:\n", - " print(\"\\n๐Ÿ” Creating basic query engine for comparison...\")\n", - " basic_engine = index.as_query_engine(similarity_top_k=5)\n", - " \n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"๐Ÿ†š COMPARISON: Basic vs Advanced RAG\")\n", - " print(\"=\" * 60)\n", - " print(\"\\nโฑ๏ธ Note: This will make multiple OpenAI API calls (~$0.03-0.05 total)\")\n", - " \n", - " for i, query in enumerate(test_queries, 1):\n", - " print(f\"\\n๐Ÿ“‹ Test Query {i}: '{query}'\")\n", - " print(\"-\" * 50)\n", - " \n", - " # Basic RAG\n", - " print(\"๐Ÿ”น Basic RAG:\")\n", - " if basic_engine:\n", - " basic_response = basic_engine.query(query)\n", - " print(f\" {str(basic_response)[:200]}...\")\n", - " \n", - " # Advanced RAG (if implemented)\n", - " print(\"\\n๐Ÿ”ธ Advanced RAG:\")\n", - " if components_status[\"Advanced Pipeline\"]:\n", - " advanced_response = advanced_pipeline.query(query)\n", - " print(f\" {str(advanced_response)[:200]}...\")\n", - " else:\n", - " print(\" Complete the advanced pipeline function to test\")\n", - "\n", - "# Final status\n", - "print(\"\\n\" + \"=\" * 60)\n", - "print(\"๐ŸŽฏ Assignment Status:\")\n", - "completed_count = sum(components_status.values())\n", - "total_count = len(components_status)\n", - "\n", - "print(f\" Completed: {completed_count}/{total_count} components\")\n", - "\n", - "if completed_count == total_count:\n", - " print(\"\\n๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\")\n", - " print(\" โœ… Node postprocessors for result filtering\")\n", - " print(\" โœ… Response synthesizers for better answers\")\n", - " print(\" โœ… Structured outputs for reliable data\")\n", - " print(\" โœ… Advanced pipelines combining all techniques\")\n", - " print(\"\\n๐Ÿš€ You're ready for production RAG systems!\")\n", - " print(\"\\n๐Ÿ“š Key Takeaways:\")\n", - " print(\" โ€ข Postprocessors filter noise โ†’ Better input quality\")\n", - " print(\" โ€ข TreeSummarize builds comprehensive answers โ†’ Better output quality\")\n", - " print(\" โ€ข Structured outputs enable system integration โ†’ Production-ready\")\n", - " print(\" โ€ข Combining techniques โ†’ Professional RAG applications\")\n", - "else:\n", - " missing = total_count - completed_count\n", - " print(f\"\\n๐Ÿ“ {missing} component(s) need attention:\")\n", - " for component, status in components_status.items():\n", - " if not status:\n", - " print(f\" โŒ {component}\")\n", - "\n", - "print(\"\\n๐Ÿ’ก Advanced RAG vs Basic RAG:\")\n", - "print(\" Basic: Good for simple queries, fast responses\")\n", - "print(\" Advanced: Better quality, comprehensive answers, production-ready\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "bootcamp", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Asheesh_Ranjan_Srivastava/Day-6/assignment_3a_basic_gradio_rag_SUBMISSION.ipynb b/Asheesh_Ranjan_Srivastava/Day-6/assignment_3a_basic_gradio_rag_SUBMISSION.ipynb deleted file mode 100644 index 321120b..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/assignment_3a_basic_gradio_rag_SUBMISSION.ipynb +++ /dev/null @@ -1,594 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 3a: Basic Gradio RAG Frontend\n", - "## Day 6 Session 2 - Building Simple RAG Applications\n", - "\n", - "In this assignment, you'll build a simple Gradio frontend for your RAG system with just the essential features:\n", - "- Button to initialize the vector database\n", - "- Search query input and button\n", - "- Display of AI responses\n", - "\n", - "**Learning Objectives:**\n", - "- Create basic Gradio interfaces\n", - "- Connect RAG backend to frontend\n", - "- Handle user interactions and database initialization\n", - "- Build functional AI-powered web applications\n", - "\n", - "**Prerequisites:**\n", - "- Completed Assignment 1 (Vector Database Basics)\n", - "- Completed Assignment 2 (Advanced RAG)\n", - "- Understanding of LlamaIndex fundamentals" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ”‘ Setup: Configure Your API Key\n", - "\n", - "**This assignment uses OpenRouter** (cheaper alternative to OpenAI direct).\n", - "\n", - "### Get Your OpenRouter API Key:\n", - "1. Go to: https://openrouter.ai/keys\n", - "2. Sign up or log in (supports Google sign-in)\n", - "3. Create a new API key\n", - "4. Copy the key (starts with `sk-or-v1-...`)\n", - "\n", - "### Why OpenRouter?\n", - "- โœ… Access to multiple models (GPT-4, Claude, etc.)\n", - "- โœ… Often cheaper than direct OpenAI access\n", - "- โœ… Easy to compare models\n", - "- โœ… Good for learning\n", - "\n", - "### Cost Estimate:\n", - "- Using GPT-4o-mini via OpenRouter\n", - "- This assignment: ~5-10 queries = **$0.005 - $0.01 total**\n", - "- Very affordable!\n", - "\n", - "**Alternative:** You can also use OpenAI API key directly if you prefer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# API Key Configuration\n", - "import os\n", - "from getpass import getpass\n", - "\n", - "# Check if API key is already set\n", - "if not os.getenv(\"OPENROUTER_API_KEY\") and not os.getenv(\"OPENAI_API_KEY\"):\n", - " print(\"\\n๐Ÿ”‘ API Key Configuration\")\n", - " print(\"=\" * 50)\n", - " print(\"This assignment needs an LLM API key.\\n\")\n", - " print(\"Option 1 (Recommended): OpenRouter API key\")\n", - " print(\" Get from: https://openrouter.ai/keys\")\n", - " print(\" Format: sk-or-v1-...\\n\")\n", - " print(\"Option 2: OpenAI API key\")\n", - " print(\" Get from: https://platform.openai.com/api-keys\")\n", - " print(\" Format: sk-proj-... or sk-...\\n\")\n", - " \n", - " api_key = getpass(\"Paste your API key: \").strip()\n", - " \n", - " if api_key:\n", - " if api_key.startswith(\"sk-or-\"):\n", - " os.environ[\"OPENROUTER_API_KEY\"] = api_key\n", - " print(\"\\nโœ… OpenRouter API key configured!\")\n", - " elif api_key.startswith(\"sk-\"):\n", - " os.environ[\"OPENAI_API_KEY\"] = api_key\n", - " print(\"\\nโœ… OpenAI API key configured!\")\n", - " else:\n", - " print(\"\\nโš ๏ธ Warning: API key format not recognized. Setting as OpenRouter key.\")\n", - " os.environ[\"OPENROUTER_API_KEY\"] = api_key\n", - " else:\n", - " print(\"\\nโš ๏ธ No API key entered. Please run this cell again.\")\n", - "else:\n", - " print(\"โœ… API key already configured!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“š Part 1: Setup and Imports\n", - "\n", - "**What this does:**\n", - "- Imports Gradio for building the web UI\n", - "- Imports LlamaIndex components (same as Assignments 1 & 2)\n", - "- Imports OpenRouter for LLM operations\n", - "\n", - "**New Library - Gradio:**\n", - "- Python library for building ML/AI web interfaces\n", - "- Simple API (just Python, no HTML/CSS/JS needed)\n", - "- Automatic UI generation from function signatures\n", - "- Built-in hosting (can share publicly)\n", - "\n", - "**Why Gradio?**\n", - "- โœ… Fast prototyping (minutes, not hours)\n", - "- โœ… No frontend coding required\n", - "- โœ… Works in Jupyter notebooks\n", - "- โœ… Easy to share demos" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import required libraries\n", - "import gradio as gr\n", - "import os\n", - "from pathlib import Path\n", - "\n", - "# LlamaIndex components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "\n", - "print(\"โœ… All libraries imported successfully!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿค– Part 2: RAG Backend Class\n", - "\n", - "**What this class does:**\n", - "- Wraps your RAG system in a reusable class\n", - "- Handles database initialization\n", - "- Processes user queries\n", - "- Manages errors gracefully\n", - "\n", - "**Key Methods:**\n", - "1. `__init__()`: Initialize with settings\n", - "2. `setup_settings()`: Configure LlamaIndex (LLM + embeddings)\n", - "3. `initialize_database()`: Load documents, create vector index\n", - "4. `query()`: Answer user questions using RAG\n", - "\n", - "**Design Pattern - Backend/Frontend Separation:**\n", - "- **Backend** (this class): Business logic, data processing\n", - "- **Frontend** (next cell): User interface, interactions\n", - "- **Benefit**: Can swap UI (Gradio โ†’ Streamlit โ†’ Flask) without changing backend\n", - "\n", - "**Error Handling:**\n", - "- Returns user-friendly messages instead of crashing\n", - "- Checks for common issues (missing data, no index, empty query)\n", - "- Uses try/except for robustness" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class SimpleRAGBackend:\n", - " \"\"\"Simple RAG backend for Gradio frontend.\"\"\"\n", - " \n", - " def __init__(self):\n", - " self.index = None\n", - " self.setup_settings()\n", - " \n", - " def setup_settings(self):\n", - " \"\"\"Configure LlamaIndex settings.\"\"\"\n", - " # Try OpenRouter first, fall back to OpenAI\n", - " openrouter_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " openai_key = os.getenv(\"OPENAI_API_KEY\")\n", - " \n", - " if openrouter_key:\n", - " from llama_index.llms.openrouter import OpenRouter\n", - " Settings.llm = OpenRouter(\n", - " api_key=openrouter_key,\n", - " model=\"openai/gpt-4o-mini\", # Via OpenRouter\n", - " temperature=0.1\n", - " )\n", - " print(\"โœ… Using OpenRouter for LLM (gpt-4o-mini)\")\n", - " elif openai_key:\n", - " from llama_index.llms.openai import OpenAI\n", - " Settings.llm = OpenAI(\n", - " api_key=openai_key,\n", - " model=\"gpt-4o-mini\",\n", - " temperature=0.1\n", - " )\n", - " print(\"โœ… Using OpenAI for LLM (gpt-4o-mini)\")\n", - " else:\n", - " print(\"โš ๏ธ No API key found - LLM operations will fail\")\n", - " \n", - " # Set up the embedding model (local, free)\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " # Set chunking parameters\n", - " Settings.chunk_size = 512\n", - " Settings.chunk_overlap = 50\n", - " print(\"โœ… Local embeddings configured (BAAI/bge-small-en-v1.5)\")\n", - " \n", - " def initialize_database(self, data_folder=\"data\"):\n", - " \"\"\"Initialize the vector database with documents.\"\"\"\n", - " # Check if data folder exists\n", - " if not Path(data_folder).exists():\n", - " return f\"โŒ Data folder '{data_folder}' not found! Please check the path.\"\n", - " \n", - " try:\n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./basic_rag_vectordb\",\n", - " table_name=\"documents\"\n", - " )\n", - " \n", - " # Load documents\n", - " reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n", - " documents = reader.load_data()\n", - " \n", - " if len(documents) == 0:\n", - " return f\"โŒ No documents found in '{data_folder}'!\"\n", - " \n", - " # Create storage context and index\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " self.index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " return f\"โœ… Database initialized successfully with {len(documents)} documents!\"\n", - " \n", - " except Exception as e:\n", - " return f\"โŒ Error initializing database: {str(e)}\"\n", - " \n", - " def query(self, question):\n", - " \"\"\"Query the RAG system and return response.\"\"\"\n", - " # Check if index exists\n", - " if self.index is None:\n", - " return \"โŒ Please initialize the database first! Click the 'Initialize Database' button above.\"\n", - " \n", - " # Check if question is empty\n", - " if not question or not question.strip():\n", - " return \"โš ๏ธ Please enter a question first!\"\n", - " \n", - " try:\n", - " # Create query engine and get response\n", - " query_engine = self.index.as_query_engine()\n", - " response = query_engine.query(question)\n", - " return str(response)\n", - " \n", - " except Exception as e:\n", - " return f\"โŒ Error processing query: {str(e)}\"\n", - "\n", - "# Initialize the backend\n", - "rag_backend = SimpleRAGBackend()\n", - "print(\"\\n๐Ÿš€ RAG Backend initialized and ready!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐ŸŽจ Part 3: Gradio Interface (TODO - Complete This Section)\n", - "\n", - "**What you'll build:**\n", - "A simple web interface with these components:\n", - "1. **Title** - Using `gr.Markdown()`\n", - "2. **Initialize Button** - Using `gr.Button()`\n", - "3. **Status Output** - Using `gr.Textbox()`\n", - "4. **Query Input** - Using `gr.Textbox()`\n", - "5. **Submit Button** - Using `gr.Button()`\n", - "6. **Response Output** - Using `gr.Textbox()`\n", - "\n", - "**Gradio Basics:**\n", - "\n", - "### Creating a Layout:\n", - "```python\n", - "with gr.Blocks() as interface:\n", - " # Add components here\n", - " title = gr.Markdown(\"# My App\")\n", - " button = gr.Button(\"Click Me\")\n", - "```\n", - "\n", - "### Component Types:\n", - "```python\n", - "# Display text/markdown\n", - "gr.Markdown(\"# Title\")\n", - "\n", - "# Button\n", - "btn = gr.Button(\"Button Label\")\n", - "\n", - "# Text input\n", - "input_box = gr.Textbox(label=\"Enter text\", placeholder=\"Type here...\")\n", - "\n", - "# Text output (read-only)\n", - "output_box = gr.Textbox(label=\"Output\", interactive=False)\n", - "```\n", - "\n", - "### Connecting Components:\n", - "```python\n", - "# Connect button to function\n", - "btn.click(\n", - " fn=my_function, # Function to call\n", - " inputs=[input_box], # What to pass as input\n", - " outputs=[output_box] # Where to put the result\n", - ")\n", - "```\n", - "\n", - "**Your Task:**\n", - "Complete the TODOs below to create the interface!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_basic_rag_interface():\n", - " \"\"\"Create basic RAG interface with essential features.\"\"\"\n", - " \n", - " def initialize_db():\n", - " \"\"\"Handle database initialization.\"\"\"\n", - " return rag_backend.initialize_database()\n", - " \n", - " def handle_query(question):\n", - " \"\"\"Handle user queries.\"\"\"\n", - " return rag_backend.query(question)\n", - " \n", - " # Create Gradio interface using gr.Blocks()\n", - " with gr.Blocks(title=\"Basic RAG Assistant\") as interface:\n", - " \n", - " # TODO 1: Add title and description\n", - " # Hint: Use gr.Markdown() for formatted text\n", - " # Example: gr.Markdown(\"# My Title\")\n", - " gr.Markdown(\"# ๐Ÿค– Basic RAG Assistant\")\n", - " gr.Markdown(\"Ask questions about your documents using AI-powered search!\")\n", - " \n", - " # Add some space\n", - " gr.Markdown(\"---\")\n", - " \n", - " # TODO 2: Add initialization section\n", - " # Hint: Create a button with gr.Button(\"Button Text\")\n", - " gr.Markdown(\"### Step 1: Initialize Database\")\n", - " gr.Markdown(\"Click the button below to load documents and create the vector database.\")\n", - " init_btn = gr.Button(\"๐Ÿš€ Initialize Database\", variant=\"primary\")\n", - " \n", - " # TODO 3: Add status output\n", - " # Hint: Use gr.Textbox(label=\"Status\", interactive=False) for read-only output\n", - " status_output = gr.Textbox(\n", - " label=\"Status\",\n", - " placeholder=\"Click 'Initialize Database' to start...\",\n", - " interactive=False,\n", - " lines=2\n", - " )\n", - " \n", - " # Add some space\n", - " gr.Markdown(\"---\")\n", - " \n", - " # TODO 4: Add query section\n", - " # Hint: You need:\n", - " # - gr.Textbox() for input (with placeholder)\n", - " # - gr.Button() for submit\n", - " # - gr.Textbox() for response output (interactive=False)\n", - " \n", - " gr.Markdown(\"### Step 2: Ask Questions\")\n", - " gr.Markdown(\"Enter your question below and click 'Ask Question' to get an AI-powered answer.\")\n", - " \n", - " query_input = gr.Textbox(\n", - " label=\"Your Question\",\n", - " placeholder=\"What would you like to know about the documents?\",\n", - " lines=2\n", - " )\n", - " \n", - " submit_btn = gr.Button(\"๐Ÿ’ฌ Ask Question\", variant=\"primary\")\n", - " \n", - " response_output = gr.Textbox(\n", - " label=\"AI Response\",\n", - " placeholder=\"Response will appear here...\",\n", - " interactive=False,\n", - " lines=10\n", - " )\n", - " \n", - " # TODO 5: Connect buttons to functions\n", - " # Hint: Use button.click(function, inputs=[...], outputs=[...])\n", - " # Example: btn.click(fn=my_func, inputs=[input_box], outputs=[output_box])\n", - " \n", - " # Connect initialize button\n", - " init_btn.click(\n", - " fn=initialize_db,\n", - " inputs=None, # No inputs needed\n", - " outputs=[status_output]\n", - " )\n", - " \n", - " # Connect submit button\n", - " submit_btn.click(\n", - " fn=handle_query,\n", - " inputs=[query_input],\n", - " outputs=[response_output]\n", - " )\n", - " \n", - " return interface\n", - "\n", - "# Create the interface\n", - "print(\"๐ŸŽจ Creating Gradio interface...\")\n", - "basic_interface = create_basic_rag_interface()\n", - "print(\"โœ… Basic RAG interface created successfully!\")\n", - "print(\"\\n๐Ÿ’ก Run the next cell to launch the app!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿš€ Part 4: Launch Your Application\n", - "\n", - "**What this does:**\n", - "- Starts a local web server\n", - "- Opens your app in a browser\n", - "- Makes your RAG system accessible via web UI\n", - "\n", - "**Launch Options:**\n", - "```python\n", - "# Basic launch (default: localhost:7860)\n", - "interface.launch()\n", - "\n", - "# Custom port\n", - "interface.launch(server_port=8080)\n", - "\n", - "# Public URL (shareable link - 72 hours)\n", - "interface.launch(share=True)\n", - "\n", - "# Inline in Jupyter\n", - "interface.launch(inline=True)\n", - "```\n", - "\n", - "**Testing Instructions:**\n", - "1. **Initialize Database** - Click the button and wait for success message (~30-60 seconds)\n", - "2. **Ask Questions** - Try the example questions below\n", - "3. **Experiment** - Try different queries to test semantic search\n", - "\n", - "**Example Questions:**\n", - "- \"What are the main topics in the documents?\"\n", - "- \"Summarize the key findings\"\n", - "- \"What are AI agents and how do they work?\"\n", - "- \"Explain the methodology used in the research\"\n", - "\n", - "**Troubleshooting:**\n", - "- **Database init fails**: Check that `data` folder exists with documents\n", - "- **Query fails**: Make sure database was initialized first\n", - "- **Slow responses**: Normal for first query (model loading), faster after" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"๐ŸŽ‰ Launching your Basic RAG Assistant...\")\n", - "print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n", - "print(\"\")\n", - "print(\"๐Ÿ“‹ Testing Instructions:\")\n", - "print(\"1. Click 'Initialize Database' button first\")\n", - "print(\"2. Wait for success message (~30-60 seconds)\")\n", - "print(\"3. Enter a question in the query box\")\n", - "print(\"4. Click 'Ask Question' to get AI response\")\n", - "print(\"\")\n", - "print(\"๐Ÿ’ก Example questions to try:\")\n", - "print(\"- What are the main topics in the documents?\")\n", - "print(\"- Summarize the key findings\")\n", - "print(\"- What are AI agents and how do they work?\")\n", - "print(\"- Explain the methodology used\")\n", - "print(\"\")\n", - "print(\"๐Ÿš€ Launching app...\")\n", - "print(\"\")\n", - "\n", - "# Launch the application\n", - "# Default: Opens at http://localhost:7860\n", - "basic_interface.launch(\n", - " server_port=7860, # Default Gradio port\n", - " share=False, # Set to True for public URL (expires in 72 hours)\n", - " inline=False # Set to True to display inline in Jupyter\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## โœ… Assignment Completion Checklist\n", - "\n", - "Before submitting, ensure you have:\n", - "\n", - "### Implementation:\n", - "- [x] RAG backend is provided and working\n", - "- [x] Created Gradio interface with required components:\n", - " - [x] Title and description using `gr.Markdown()`\n", - " - [x] Initialize database button using `gr.Button()`\n", - " - [x] Status output using `gr.Textbox()`\n", - " - [x] Query input field using `gr.Textbox()`\n", - " - [x] Submit query button using `gr.Button()`\n", - " - [x] Response output area using `gr.Textbox()`\n", - "- [x] Connected buttons to backend functions using `.click()`\n", - "- [x] Successfully launched the application\n", - "\n", - "### Testing:\n", - "- [ ] Tested database initialization (should show success message)\n", - "- [ ] Tested query functionality (should return AI responses)\n", - "- [ ] Tested error handling (try querying before initialization)\n", - "- [ ] Tested with multiple different questions\n", - "\n", - "### Understanding:\n", - "- [ ] Understand how Gradio components work\n", - "- [ ] Understand how to connect UI to backend functions\n", - "- [ ] Understand the RAG query flow (database โ†’ retrieval โ†’ LLM โ†’ response)\n", - "\n", - "---\n", - "\n", - "## ๐ŸŽŠ Congratulations!\n", - "\n", - "You've successfully built your first Gradio RAG application! You now have:\n", - "\n", - "โœ… **A functional web interface** for your RAG system\n", - "โœ… **Understanding of Gradio basics** and component connections\n", - "โœ… **A foundation** for building more complex AI applications\n", - "โœ… **Hands-on experience** with frontend-backend integration\n", - "\n", - "### What You Learned:\n", - "- Creating web UIs with Gradio (no HTML/CSS/JS needed!)\n", - "- Connecting UI components to Python functions\n", - "- Building interactive AI applications\n", - "- Handling user interactions and errors\n", - "- Deploying RAG systems with web interfaces\n", - "\n", - "### Next Steps:\n", - "**Complete Assignment 3b** to add advanced configuration options:\n", - "- API key input in the UI\n", - "- Adjustable similarity threshold\n", - "- Different response modes\n", - "- Model selection\n", - "\n", - "---\n", - "\n", - "## ๐Ÿ“š Additional Resources:\n", - "- Gradio Docs: https://www.gradio.app/docs\n", - "- LlamaIndex Docs: https://docs.llamaindex.ai/\n", - "- OpenRouter: https://openrouter.ai/docs" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "bootcamp", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Asheesh_Ranjan_Srivastava/Day-6/assignment_3b_advanced_gradio_rag_SUBMISSION.ipynb b/Asheesh_Ranjan_Srivastava/Day-6/assignment_3b_advanced_gradio_rag_SUBMISSION.ipynb deleted file mode 100644 index a2cbca7..0000000 --- a/Asheesh_Ranjan_Srivastava/Day-6/assignment_3b_advanced_gradio_rag_SUBMISSION.ipynb +++ /dev/null @@ -1,955 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 3b: Advanced Gradio RAG Frontend\n", - "## Day 6 Session 2 - Building Configurable RAG Applications\n", - "\n", - "In this assignment, you'll extend your basic RAG interface with advanced configuration options to create a professional, feature-rich RAG application.\n", - "\n", - "**New Features to Add:**\n", - "- Model selection dropdown (gpt-4o, gpt-4o-mini)\n", - "- Temperature slider (0 to 1 with 0.1 intervals)\n", - "- Chunk size configuration\n", - "- Chunk overlap configuration \n", - "- Similarity top-k slider\n", - "- Node postprocessor multiselect\n", - "- Similarity cutoff slider\n", - "- Response synthesizer multiselect\n", - "\n", - "**Learning Objectives:**\n", - "- Advanced Gradio components and interactions\n", - "- Dynamic RAG configuration\n", - "- Professional UI design patterns\n", - "- Parameter validation and handling\n", - "- Building production-ready AI applications\n", - "\n", - "**Prerequisites:**\n", - "- Completed Assignment 3a (Basic Gradio RAG)\n", - "- Understanding of RAG parameters and their effects" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ”‘ Setup: Configure Your API Key\n", - "\n", - "**This assignment uses OpenRouter** (cheaper alternative to OpenAI direct).\n", - "\n", - "### Get Your OpenRouter API Key:\n", - "1. Go to: https://openrouter.ai/keys\n", - "2. Sign up or log in (supports Google sign-in)\n", - "3. Create a new API key\n", - "4. Copy the key (starts with `sk-or-v1-...`)\n", - "\n", - "### Why OpenRouter?\n", - "- โœ… Access to multiple models (GPT-4, Claude, Gemini, etc.)\n", - "- โœ… Often cheaper than direct OpenAI access\n", - "- โœ… Easy to compare models\n", - "- โœ… Good for learning and experimentation\n", - "\n", - "### Cost Estimate:\n", - "- Using GPT-4o-mini via OpenRouter\n", - "- This assignment: ~10-15 queries with different configs = **$0.01 - $0.02 total**\n", - "- Very affordable!\n", - "\n", - "**Alternative:** You can also use OpenAI API key directly if you prefer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# API Key Configuration\n", - "import os\n", - "from getpass import getpass\n", - "\n", - "# Check if API key is already set\n", - "if not os.getenv(\"OPENROUTER_API_KEY\") and not os.getenv(\"OPENAI_API_KEY\"):\n", - " print(\"\\n๐Ÿ”‘ API Key Configuration\")\n", - " print(\"=\" * 50)\n", - " print(\"This assignment needs an LLM API key.\\n\")\n", - " print(\"Option 1 (Recommended): OpenRouter API key\")\n", - " print(\" Get from: https://openrouter.ai/keys\")\n", - " print(\" Format: sk-or-v1-...\")\n", - " print(\" Benefit: Access to multiple models, often cheaper\\n\")\n", - " print(\"Option 2: OpenAI API key\")\n", - " print(\" Get from: https://platform.openai.com/api-keys\")\n", - " print(\" Format: sk-proj-... or sk-...\\n\")\n", - " \n", - " api_key = getpass(\"Paste your API key: \").strip()\n", - " \n", - " if api_key:\n", - " if api_key.startswith(\"sk-or-\"):\n", - " os.environ[\"OPENROUTER_API_KEY\"] = api_key\n", - " print(\"\\nโœ… OpenRouter API key configured!\")\n", - " elif api_key.startswith(\"sk-\"):\n", - " os.environ[\"OPENAI_API_KEY\"] = api_key\n", - " print(\"\\nโœ… OpenAI API key configured!\")\n", - " else:\n", - " print(\"\\nโš ๏ธ Warning: API key format not recognized. Setting as OpenRouter key.\")\n", - " os.environ[\"OPENROUTER_API_KEY\"] = api_key\n", - " else:\n", - " print(\"\\nโš ๏ธ No API key entered. Please run this cell again.\")\n", - "else:\n", - " if os.getenv(\"OPENROUTER_API_KEY\"):\n", - " print(\"โœ… OpenRouter API key already configured!\")\n", - " else:\n", - " print(\"โœ… OpenAI API key already configured!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ“š Part 1: Setup and Imports\n", - "\n", - "**What's new vs Assignment 3a:**\n", - "- Advanced RAG components (postprocessors, synthesizers)\n", - "- More sophisticated configuration handling\n", - "\n", - "**Libraries:**\n", - "- **Gradio**: Web UI framework\n", - "- **LlamaIndex Core**: Basic RAG components\n", - "- **LlamaIndex Advanced**: Postprocessors and response synthesizers\n", - "- **OpenRouter**: LLM access (multi-model support)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import all required libraries\n", - "import gradio as gr\n", - "import os\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Any\n", - "\n", - "# LlamaIndex core components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "\n", - "# Advanced RAG components\n", - "from llama_index.core.postprocessor import SimilarityPostprocessor\n", - "from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "\n", - "print(\"โœ… All libraries imported successfully!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿค– Part 2: Advanced RAG Backend Class\n", - "\n", - "**What this class does:**\n", - "- Supports dynamic configuration of ALL RAG parameters\n", - "- Handles multiple postprocessors and synthesizers\n", - "- Returns detailed results including sources and config used\n", - "\n", - "**Key Methods:**\n", - "1. `update_settings()` - Dynamically update LLM, temperature, chunking\n", - "2. `initialize_database()` - Load documents and create vector index\n", - "3. `get_postprocessor()` - Create configured postprocessor\n", - "4. `get_synthesizer()` - Create configured response synthesizer\n", - "5. `advanced_query()` - Process queries with full configuration\n", - "\n", - "**Configuration Options:**\n", - "- **Model**: Which LLM to use (gpt-4o, gpt-4o-mini, etc.)\n", - "- **Temperature**: Randomness of responses (0.0-1.0)\n", - "- **Chunk Size**: How much text per chunk (256-1024)\n", - "- **Chunk Overlap**: Context preserved between chunks (10-100)\n", - "- **Similarity Top-K**: How many chunks to retrieve (1-20)\n", - "- **Postprocessors**: Filters for retrieved chunks\n", - "- **Similarity Cutoff**: Minimum score for postprocessor (0.0-1.0)\n", - "- **Response Synthesizer**: How to combine chunks into answer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class AdvancedRAGBackend:\n", - " \"\"\"Advanced RAG backend with configurable parameters.\"\"\"\n", - " \n", - " def __init__(self):\n", - " self.index = None\n", - " self.available_models = [\"openai/gpt-4o\", \"openai/gpt-4o-mini\"]\n", - " self.available_postprocessors = [\"SimilarityPostprocessor\", \"None\"]\n", - " self.available_synthesizers = [\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"]\n", - " self.update_settings()\n", - " \n", - " def update_settings(self, model: str = \"openai/gpt-4o-mini\", temperature: float = 0.1, \n", - " chunk_size: int = 512, chunk_overlap: int = 50):\n", - " \"\"\"Update LlamaIndex settings based on user configuration.\"\"\"\n", - " # Try OpenRouter first, fall back to OpenAI\n", - " openrouter_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " openai_key = os.getenv(\"OPENAI_API_KEY\")\n", - " \n", - " if openrouter_key:\n", - " Settings.llm = OpenRouter(\n", - " api_key=openrouter_key,\n", - " model=model,\n", - " temperature=temperature\n", - " )\n", - " elif openai_key:\n", - " from llama_index.llms.openai import OpenAI\n", - " # Extract model name (remove \"openai/\" prefix if present)\n", - " model_name = model.replace(\"openai/\", \"\")\n", - " Settings.llm = OpenAI(\n", - " api_key=openai_key,\n", - " model=model_name,\n", - " temperature=temperature\n", - " )\n", - " \n", - " # Set up the embedding model (keep this constant - local and free)\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " # Set chunking parameters from function parameters\n", - " Settings.chunk_size = chunk_size\n", - " Settings.chunk_overlap = chunk_overlap\n", - " \n", - " def initialize_database(self, data_folder=\"data\"):\n", - " \"\"\"Initialize the vector database with documents.\"\"\"\n", - " if not Path(data_folder).exists():\n", - " return f\"โŒ Data folder '{data_folder}' not found! Please check the path.\"\n", - " \n", - " try:\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./advanced_rag_vectordb\",\n", - " table_name=\"documents\"\n", - " )\n", - " \n", - " reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n", - " documents = reader.load_data()\n", - " \n", - " if len(documents) == 0:\n", - " return f\"โŒ No documents found in '{data_folder}'!\"\n", - " \n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " self.index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " return f\"โœ… Database initialized successfully with {len(documents)} documents!\"\n", - " \n", - " except Exception as e:\n", - " return f\"โŒ Error initializing database: {str(e)}\"\n", - " \n", - " def get_postprocessor(self, postprocessor_name: str, similarity_cutoff: float):\n", - " \"\"\"Get the selected postprocessor.\"\"\"\n", - " if postprocessor_name == \"SimilarityPostprocessor\":\n", - " return SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n", - " return None\n", - " \n", - " def get_synthesizer(self, synthesizer_name: str):\n", - " \"\"\"Get the selected response synthesizer.\"\"\"\n", - " if synthesizer_name == \"TreeSummarize\":\n", - " return TreeSummarize()\n", - " elif synthesizer_name == \"Refine\":\n", - " return Refine()\n", - " elif synthesizer_name == \"CompactAndRefine\":\n", - " return CompactAndRefine()\n", - " return None # Default synthesizer\n", - " \n", - " def advanced_query(self, question: str, model: str, temperature: float, \n", - " chunk_size: int, chunk_overlap: int, similarity_top_k: int,\n", - " postprocessor_names: List[str], similarity_cutoff: float,\n", - " synthesizer_name: str) -> Dict[str, Any]:\n", - " \"\"\"Query the RAG system with advanced configuration.\"\"\"\n", - " \n", - " if self.index is None:\n", - " return {\"response\": \"โŒ Please initialize the database first!\", \"sources\": [], \"config\": {}}\n", - " \n", - " if not question or not question.strip():\n", - " return {\"response\": \"โš ๏ธ Please enter a question first!\", \"sources\": [], \"config\": {}}\n", - " \n", - " try:\n", - " # Update settings with new parameters\n", - " self.update_settings(model, temperature, chunk_size, chunk_overlap)\n", - " \n", - " # Get postprocessors\n", - " postprocessors = []\n", - " for name in postprocessor_names:\n", - " processor = self.get_postprocessor(name, similarity_cutoff)\n", - " if processor is not None:\n", - " postprocessors.append(processor)\n", - " \n", - " # Get synthesizer\n", - " synthesizer = self.get_synthesizer(synthesizer_name)\n", - " \n", - " # Create query engine with all parameters\n", - " query_engine_kwargs = {\"similarity_top_k\": similarity_top_k}\n", - " if postprocessors:\n", - " query_engine_kwargs[\"node_postprocessors\"] = postprocessors\n", - " if synthesizer is not None:\n", - " query_engine_kwargs[\"response_synthesizer\"] = synthesizer\n", - " \n", - " query_engine = self.index.as_query_engine(**query_engine_kwargs)\n", - " \n", - " # Query and get response\n", - " response = query_engine.query(question)\n", - " \n", - " # Extract source information if available\n", - " sources = []\n", - " if hasattr(response, 'source_nodes'):\n", - " for node in response.source_nodes:\n", - " sources.append({\n", - " \"text\": node.text[:200] + \"...\",\n", - " \"score\": getattr(node, 'score', 0.0),\n", - " \"source\": getattr(node.node, 'metadata', {}).get('file_name', 'Unknown')\n", - " })\n", - " \n", - " return {\n", - " \"response\": str(response),\n", - " \"sources\": sources,\n", - " \"config\": {\n", - " \"model\": model,\n", - " \"temperature\": temperature,\n", - " \"chunk_size\": chunk_size,\n", - " \"chunk_overlap\": chunk_overlap,\n", - " \"similarity_top_k\": similarity_top_k,\n", - " \"postprocessors\": postprocessor_names,\n", - " \"similarity_cutoff\": similarity_cutoff,\n", - " \"synthesizer\": synthesizer_name\n", - " }\n", - " }\n", - " \n", - " except Exception as e:\n", - " return {\"response\": f\"โŒ Error processing query: {str(e)}\", \"sources\": [], \"config\": {}}\n", - "\n", - "# Initialize the backend\n", - "print(\"๐Ÿš€ Initializing Advanced RAG Backend...\")\n", - "rag_backend = AdvancedRAGBackend()\n", - "print(\"โœ… Advanced RAG Backend initialized and ready!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐ŸŽจ Part 3: Advanced Gradio Interface\n", - "\n", - "**What you'll build:**\n", - "A sophisticated 2-column layout:\n", - "- **Left Column**: All configuration controls\n", - "- **Right Column**: Query interface and responses\n", - "\n", - "**Components Needed:**\n", - "\n", - "### Configuration Controls (Left):\n", - "1. **Model Dropdown** - `gr.Dropdown(choices=[...], value=\"...\")`\n", - "2. **Temperature Slider** - `gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.1)`\n", - "3. **Chunk Size Number** - `gr.Number(value=512, minimum=128, maximum=2048)`\n", - "4. **Chunk Overlap Number** - `gr.Number(value=50, minimum=0, maximum=200)`\n", - "5. **Similarity Top-K Slider** - `gr.Slider(minimum=1, maximum=20, step=1, value=5)`\n", - "6. **Postprocessor Checkbox** - `gr.CheckboxGroup(choices=[...], value=[...])`\n", - "7. **Similarity Cutoff Slider** - `gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.3)`\n", - "8. **Synthesizer Dropdown** - `gr.Dropdown(choices=[...], value=\"Default\")`\n", - "\n", - "### Query Interface (Right):\n", - "1. **Query Input** - `gr.Textbox(lines=3, placeholder=\"...\")`\n", - "2. **Submit Button** - `gr.Button(variant=\"primary\")`\n", - "3. **Response Output** - `gr.Textbox(lines=12, interactive=False)`\n", - "4. **Config Display** - `gr.Textbox(lines=8, interactive=False)`\n", - "\n", - "**Layout Pattern:**\n", - "```python\n", - "with gr.Blocks() as interface:\n", - " # Title\n", - " with gr.Row():\n", - " with gr.Column(scale=1): # Left - Config\n", - " # Configuration controls\n", - " with gr.Column(scale=2): # Right - Query\n", - " # Query interface\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_advanced_rag_interface():\n", - " \"\"\"Create advanced RAG interface with full configuration options.\"\"\"\n", - " \n", - " def initialize_db():\n", - " \"\"\"Handle database initialization.\"\"\"\n", - " return rag_backend.initialize_database()\n", - " \n", - " def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap, \n", - " similarity_top_k, postprocessors, similarity_cutoff, synthesizer):\n", - " \"\"\"Handle advanced RAG queries with all configuration options.\"\"\"\n", - " result = rag_backend.advanced_query(\n", - " question, model, temperature, chunk_size, chunk_overlap,\n", - " similarity_top_k, postprocessors, similarity_cutoff, synthesizer\n", - " )\n", - " \n", - " # Format configuration for display\n", - " config_text = f\"\"\"**Current Configuration:**\n", - "- Model: {result['config'].get('model', 'N/A')}\n", - "- Temperature: {result['config'].get('temperature', 'N/A')}\n", - "- Chunk Size: {result['config'].get('chunk_size', 'N/A')}\n", - "- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')}\n", - "- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')}\n", - "- Postprocessors: {', '.join(result['config'].get('postprocessors', []))}\n", - "- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')}\n", - "- Synthesizer: {result['config'].get('synthesizer', 'N/A')}\"\"\"\n", - " \n", - " return result[\"response\"], config_text\n", - " \n", - " # Create the advanced interface structure\n", - " with gr.Blocks(title=\"Advanced RAG Assistant\") as interface:\n", - " # Title and description\n", - " gr.Markdown(\"# ๐Ÿค– Advanced RAG Assistant\")\n", - " gr.Markdown(\"Configure all RAG parameters and experiment with different settings!\")\n", - " gr.Markdown(\"---\")\n", - " \n", - " # Database initialization section\n", - " gr.Markdown(\"### ๐Ÿš€ Step 1: Initialize Database\")\n", - " init_btn = gr.Button(\"Initialize Vector Database\", variant=\"primary\", size=\"lg\")\n", - " status_output = gr.Textbox(\n", - " label=\"Status\",\n", - " placeholder=\"Click 'Initialize Vector Database' to start...\",\n", - " interactive=False,\n", - " lines=2\n", - " )\n", - " \n", - " gr.Markdown(\"---\")\n", - " gr.Markdown(\"### ๐Ÿ’ฌ Step 2: Configure & Query\")\n", - " \n", - " # Main layout with columns\n", - " with gr.Row():\n", - " # Left column: Configuration controls\n", - " with gr.Column(scale=1):\n", - " gr.Markdown(\"#### โš™๏ธ RAG Configuration\")\n", - " \n", - " # Model selection\n", - " model_dropdown = gr.Dropdown(\n", - " choices=[\"openai/gpt-4o\", \"openai/gpt-4o-mini\"],\n", - " value=\"openai/gpt-4o-mini\",\n", - " label=\"Model\",\n", - " info=\"Choose LLM model (gpt-4o-mini is faster & cheaper)\"\n", - " )\n", - " \n", - " # Temperature control\n", - " temperature_slider = gr.Slider(\n", - " minimum=0.0,\n", - " maximum=1.0,\n", - " step=0.1,\n", - " value=0.1,\n", - " label=\"Temperature\",\n", - " info=\"0.0 = deterministic, 1.0 = creative\"\n", - " )\n", - " \n", - " gr.Markdown(\"**Chunking Parameters:**\")\n", - " \n", - " # Chunk size\n", - " chunk_size_input = gr.Number(\n", - " value=512,\n", - " minimum=128,\n", - " maximum=2048,\n", - " label=\"Chunk Size\",\n", - " info=\"Characters per chunk (default: 512)\"\n", - " )\n", - " \n", - " # Chunk overlap\n", - " chunk_overlap_input = gr.Number(\n", - " value=50,\n", - " minimum=0,\n", - " maximum=200,\n", - " label=\"Chunk Overlap\",\n", - " info=\"Overlap between chunks (default: 50)\"\n", - " )\n", - " \n", - " gr.Markdown(\"**Retrieval Parameters:**\")\n", - " \n", - " # Similarity top-k\n", - " similarity_topk_slider = gr.Slider(\n", - " minimum=1,\n", - " maximum=20,\n", - " step=1,\n", - " value=5,\n", - " label=\"Similarity Top-K\",\n", - " info=\"Number of chunks to retrieve\"\n", - " )\n", - " \n", - " # Postprocessor selection\n", - " postprocessor_checkbox = gr.CheckboxGroup(\n", - " choices=[\"SimilarityPostprocessor\", \"None\"],\n", - " value=[\"SimilarityPostprocessor\"],\n", - " label=\"Node Postprocessors\",\n", - " info=\"Filters for retrieved chunks\"\n", - " )\n", - " \n", - " # Similarity cutoff\n", - " similarity_cutoff_slider = gr.Slider(\n", - " minimum=0.0,\n", - " maximum=1.0,\n", - " step=0.1,\n", - " value=0.3,\n", - " label=\"Similarity Cutoff\",\n", - " info=\"Minimum relevance score (0.3 recommended)\"\n", - " )\n", - " \n", - " # Response synthesizer\n", - " synthesizer_dropdown = gr.Dropdown(\n", - " choices=[\"Default\", \"TreeSummarize\", \"Refine\", \"CompactAndRefine\"],\n", - " value=\"Default\",\n", - " label=\"Response Synthesizer\",\n", - " info=\"How to combine retrieved chunks\"\n", - " )\n", - " \n", - " # Right column: Query interface\n", - " with gr.Column(scale=2):\n", - " gr.Markdown(\"#### ๐Ÿ’ฌ Query Interface\")\n", - " \n", - " # Query input\n", - " query_input = gr.Textbox(\n", - " label=\"Your Question\",\n", - " placeholder=\"What would you like to know about the documents?\",\n", - " lines=3\n", - " )\n", - " \n", - " # Submit button\n", - " submit_btn = gr.Button(\"๐Ÿ” Ask Question\", variant=\"primary\", size=\"lg\")\n", - " \n", - " # Response output\n", - " response_output = gr.Textbox(\n", - " label=\"AI Response\",\n", - " placeholder=\"Response will appear here...\",\n", - " interactive=False,\n", - " lines=12\n", - " )\n", - " \n", - " # Configuration display\n", - " config_display = gr.Textbox(\n", - " label=\"Configuration Used\",\n", - " placeholder=\"Configuration details will appear here...\",\n", - " interactive=False,\n", - " lines=8\n", - " )\n", - " \n", - " # Connect functions to components\n", - " init_btn.click(initialize_db, outputs=[status_output])\n", - " \n", - " submit_btn.click(\n", - " handle_advanced_query,\n", - " inputs=[\n", - " query_input, model_dropdown, temperature_slider,\n", - " chunk_size_input, chunk_overlap_input, similarity_topk_slider,\n", - " postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown\n", - " ],\n", - " outputs=[response_output, config_display]\n", - " )\n", - " \n", - " return interface\n", - "\n", - "# Create the interface\n", - "print(\"๐ŸŽจ Creating advanced Gradio interface...\")\n", - "advanced_interface = create_advanced_rag_interface()\n", - "print(\"โœ… Advanced RAG interface created successfully!\")\n", - "print(\"\\n๐Ÿ’ก Run the next cell to launch the app!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿš€ Part 4: Launch Your Advanced Application\n", - "\n", - "**What this does:**\n", - "- Starts a local web server with your advanced RAG interface\n", - "- Opens in browser at http://localhost:7860\n", - "- Provides full configurability of RAG parameters\n", - "\n", - "**Testing Strategy:**\n", - "\n", - "### 1. Baseline Test (Default Settings):\n", - "- Initialize database\n", - "- Ask: \"What are AI agents?\"\n", - "- Note the response quality and configuration\n", - "\n", - "### 2. Model Comparison:\n", - "- **Test 1**: gpt-4o-mini, temperature 0.1\n", - "- **Test 2**: gpt-4o, temperature 0.1 (same question)\n", - "- **Compare**: Quality difference vs cost\n", - "\n", - "### 3. Temperature Experiment:\n", - "- **Test 1**: Temperature 0.1 (deterministic)\n", - "- **Test 2**: Temperature 0.9 (creative)\n", - "- **Compare**: Consistency vs creativity\n", - "\n", - "### 4. Chunk Size Impact:\n", - "- **Test 1**: Chunk size 256 (fine-grained)\n", - "- **Test 2**: Chunk size 1024 (coarse-grained)\n", - "- **Compare**: Precision vs context\n", - "\n", - "### 5. Synthesizer Comparison:\n", - "- **Test 1**: Default synthesizer\n", - "- **Test 2**: TreeSummarize\n", - "- **Test 3**: Refine\n", - "- **Compare**: Response structure and quality\n", - "\n", - "### 6. Filtering Effects:\n", - "- **Test 1**: Similarity cutoff 0.1 (permissive)\n", - "- **Test 2**: Similarity cutoff 0.7 (strict)\n", - "- **Compare**: Relevance vs completeness" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"๐ŸŽ‰ Launching your Advanced RAG Assistant...\")\n", - "print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n", - "print(\"\")\n", - "print(\"โš ๏ธ Important: Make sure your API key is configured (run first cell if needed)\")\n", - "print(\"\")\n", - "print(\"๐Ÿ“‹ Testing Instructions:\")\n", - "print(\"1. Click 'Initialize Vector Database' button first\")\n", - "print(\"2. Wait for success message (~30-60 seconds)\")\n", - "print(\"3. Configure your RAG parameters in the left column:\")\n", - "print(\" - Choose model (gpt-4o, gpt-4o-mini)\")\n", - "print(\" - Adjust temperature (0.0 = deterministic, 1.0 = creative)\")\n", - "print(\" - Set chunk size and overlap\")\n", - "print(\" - Choose similarity top-k\")\n", - "print(\" - Select postprocessors and synthesizer\")\n", - "print(\"4. Enter a question in the right column\")\n", - "print(\"5. Click 'Ask Question'\")\n", - "print(\"6. Review both the response and configuration used\")\n", - "print(\"\")\n", - "print(\"๐Ÿงช Experiments to try:\")\n", - "print(\"- Compare gpt-4o vs gpt-4o-mini with same question\")\n", - "print(\"- Test temperature effects (0.1 vs 0.9)\")\n", - "print(\"- Try different chunk sizes (256 vs 1024)\")\n", - "print(\"- Compare synthesizers (Default vs TreeSummarize vs Refine)\")\n", - "print(\"- Adjust similarity cutoff (0.1 vs 0.7) to see filtering\")\n", - "print(\"\")\n", - "print(\"๐Ÿ’ก Example questions:\")\n", - "print(\"- What are the main topics covered in the documents?\")\n", - "print(\"- Compare and contrast different AI agent architectures\")\n", - "print(\"- How do evaluation metrics work for AI agents?\")\n", - "print(\"\")\n", - "print(\"๐Ÿš€ Launching app...\")\n", - "print(\"\")\n", - "\n", - "# Launch the application\n", - "advanced_interface.launch(\n", - " server_port=7861, # Different port from 3a to avoid conflicts\n", - " share=False, # Set to True for public URL (72 hours)\n", - " inline=False # Set to True to display inline in Jupyter\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## ๐Ÿ’ก Understanding the Configuration Options\n", - "\n", - "### Model Selection\n", - "**What it controls**: Which LLM processes your query and generates the response.\n", - "\n", - "- **gpt-4o**: Latest and most capable\n", - " - โœ… Best quality responses\n", - " - โœ… Better reasoning\n", - " - โŒ More expensive (~$2.50/$10 per 1M tokens)\n", - " - โŒ Slower responses\n", - "\n", - "- **gpt-4o-mini**: Optimized and efficient\n", - " - โœ… Fast responses\n", - " - โœ… Very cheap (~$0.15/$0.60 per 1M tokens)\n", - " - โœ… Good quality for most tasks\n", - " - โŒ Slightly less capable for complex reasoning\n", - "\n", - "**Recommendation**: Start with gpt-4o-mini, upgrade to gpt-4o if quality insufficient.\n", - "\n", - "---\n", - "\n", - "### Temperature (0.0 - 1.0)\n", - "**What it controls**: Randomness/creativity in responses.\n", - "\n", - "- **0.0-0.2**: Deterministic, factual\n", - " - โœ… Consistent responses\n", - " - โœ… Best for facts and data\n", - " - โŒ Can be repetitive\n", - "\n", - "- **0.3-0.7**: Balanced\n", - " - โœ… Some variation\n", - " - โœ… Still reliable\n", - " - Good default\n", - "\n", - "- **0.8-1.0**: Creative\n", - " - โœ… More varied responses\n", - " - โœ… Good for brainstorming\n", - " - โŒ Less predictable\n", - " - โŒ May hallucinate\n", - "\n", - "**Recommendation**: 0.1 for factual queries, 0.5-0.7 for creative tasks.\n", - "\n", - "---\n", - "\n", - "### Chunk Size & Overlap\n", - "**What they control**: How documents are split for processing.\n", - "\n", - "**Chunk Size** (typical: 256-1024):\n", - "- **Smaller (256-512)**:\n", - " - โœ… More precise retrieval\n", - " - โœ… Better for finding specific info\n", - " - โŒ May miss broader context\n", - "\n", - "- **Larger (768-1024)**:\n", - " - โœ… More context per chunk\n", - " - โœ… Better for understanding relationships\n", - " - โŒ Less precise\n", - " - โŒ More tokens (higher cost)\n", - "\n", - "**Chunk Overlap** (typical: 10-100):\n", - "- **Purpose**: Prevents splitting sentences/concepts\n", - "- **Trade-off**: More overlap = better context but more redundancy\n", - "- **Rule of thumb**: 10% of chunk size (e.g., 50 for size 512)\n", - "\n", - "**Recommendation**: 512 size + 50 overlap for balanced performance.\n", - "\n", - "---\n", - "\n", - "### Similarity Top-K (1-20)\n", - "**What it controls**: How many document chunks to retrieve.\n", - "\n", - "- **Lower (3-5)**:\n", - " - โœ… Focused, faster\n", - " - โœ… Lower cost\n", - " - โŒ May miss relevant info\n", - "\n", - "- **Higher (8-15)**:\n", - " - โœ… More comprehensive\n", - " - โœ… Less likely to miss relevant info\n", - " - โŒ Slower\n", - " - โŒ Higher cost\n", - " - โŒ More noise\n", - "\n", - "**Recommendation**: 5 for most queries, 10+ for complex analytical questions.\n", - "\n", - "---\n", - "\n", - "### Node Postprocessors\n", - "**What they do**: Filter/rerank retrieved chunks before sending to LLM.\n", - "\n", - "**SimilarityPostprocessor**:\n", - "- Removes chunks below similarity cutoff\n", - "- โœ… Improves quality (removes noise)\n", - "- โœ… Reduces cost (fewer tokens)\n", - "- Works with similarity cutoff slider\n", - "\n", - "**Recommendation**: Enable for production use.\n", - "\n", - "---\n", - "\n", - "### Similarity Cutoff (0.0-1.0)\n", - "**What it controls**: Minimum relevance score for postprocessor.\n", - "\n", - "- **Lower (0.1-0.3)**:\n", - " - โœ… More permissive\n", - " - โœ… Includes potentially relevant docs\n", - " - โŒ More noise\n", - "\n", - "- **Higher (0.5-0.8)**:\n", - " - โœ… Only highly relevant docs\n", - " - โœ… Cleaner results\n", - " - โŒ May filter out useful info\n", - "\n", - "**Recommendation**: 0.3 as default, adjust based on results.\n", - "\n", - "---\n", - "\n", - "### Response Synthesizers\n", - "**What they do**: Combine multiple chunks into final answer.\n", - "\n", - "**Default**:\n", - "- โœ… Fast\n", - "- โœ… Simple\n", - "- Good for straightforward queries\n", - "\n", - "**TreeSummarize**:\n", - "- Hierarchical summarization\n", - "- โœ… Best for complex analytical queries\n", - "- โœ… Comprehensive answers\n", - "- โŒ Slower (more API calls)\n", - "- โŒ Higher cost\n", - "\n", - "**Refine**:\n", - "- Iterative improvement\n", - "- โœ… Detailed, thorough answers\n", - "- โœ… Good for building on information\n", - "- โŒ Slowest\n", - "- โŒ Highest cost\n", - "\n", - "**CompactAndRefine**:\n", - "- Balanced version of Refine\n", - "- โœ… Better than Default\n", - "- โœ… Faster than Refine\n", - "- Good middle ground\n", - "\n", - "**Recommendation**: Default for speed, TreeSummarize for quality, CompactAndRefine for balance." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "## โœ… Assignment Completion Checklist\n", - "\n", - "### Implementation:\n", - "- [x] API key configuration added\n", - "- [x] Advanced RAG backend with all methods implemented\n", - "- [x] Gradio interface with all required components:\n", - " - [x] Initialize database button\n", - " - [x] Model selection dropdown\n", - " - [x] Temperature slider\n", - " - [x] Chunk size input\n", - " - [x] Chunk overlap input\n", - " - [x] Similarity top-k slider\n", - " - [x] Node postprocessor checkbox\n", - " - [x] Similarity cutoff slider\n", - " - [x] Response synthesizer dropdown\n", - " - [x] Query input and submit button\n", - " - [x] Response output\n", - " - [x] Configuration display\n", - "- [x] All components connected to backend functions\n", - "- [x] Professional 2-column layout\n", - "\n", - "### Testing:\n", - "- [ ] Database initialization works\n", - "- [ ] All configuration controls update correctly\n", - "- [ ] Queries return responses\n", - "- [ ] Configuration display shows current settings\n", - "- [ ] Tested different models\n", - "- [ ] Tested different temperatures\n", - "- [ ] Tested different chunk sizes\n", - "- [ ] Tested different synthesizers\n", - "- [ ] Tested postprocessor filtering\n", - "\n", - "### Understanding:\n", - "- [ ] Understand how each parameter affects results\n", - "- [ ] Can explain model differences\n", - "- [ ] Can explain temperature effects\n", - "- [ ] Can explain chunking strategies\n", - "- [ ] Can explain synthesizer differences\n", - "- [ ] Can explain postprocessor benefits\n", - "\n", - "---\n", - "\n", - "## ๐ŸŽŠ Congratulations!\n", - "\n", - "You've successfully built a **professional, production-ready RAG application**! \n", - "\n", - "### What You Achieved:\n", - "โœ… **Full configurability** - Every RAG parameter exposed and adjustable\n", - "โœ… **Professional UI** - Clean 2-column layout with organized controls\n", - "โœ… **Real-time configuration** - Experiment with settings and see immediate results\n", - "โœ… **Production patterns** - Error handling, validation, configuration display\n", - "โœ… **Advanced features** - Multiple models, synthesizers, postprocessors\n", - "\n", - "### Skills Mastered:\n", - "- Building complex Gradio interfaces with multiple components\n", - "- Dynamic RAG configuration and parameter tuning\n", - "- Professional UI/UX design patterns\n", - "- Production-ready error handling\n", - "- Performance vs quality trade-offs\n", - "\n", - "### What Makes This Production-Ready:\n", - "1. **Comprehensive Configuration** - All parameters tunable\n", - "2. **Error Handling** - Graceful failures with user-friendly messages\n", - "3. **Transparency** - Shows exact configuration used for each query\n", - "4. **Flexibility** - Supports multiple models and strategies\n", - "5. **Professional Design** - Clean, organized, intuitive interface\n", - "\n", - "---\n", - "\n", - "## ๐Ÿš€ Next Steps & Career Applications\n", - "\n", - "### Immediate Enhancements:\n", - "1. **Save/Load Configs** - Store favorite configurations\n", - "2. **Comparison Mode** - Side-by-side results with different configs\n", - "3. **Cost Tracking** - Monitor API costs per query\n", - "4. **Performance Metrics** - Track response times\n", - "5. **Export Results** - Download responses as markdown/PDF\n", - "\n", - "### Production Deployment:\n", - "- **Hugging Face Spaces** - Free hosting with GPU support\n", - "- **Docker** - Containerize for scalability\n", - "- **Cloud Platforms** - AWS/GCP/Azure deployment\n", - "- **Authentication** - Add user accounts\n", - "- **Database** - Store queries and configurations\n", - "\n", - "### Portfolio Value:\n", - "This project demonstrates:\n", - "- โœ… Advanced AI/ML application development\n", - "- โœ… Production-ready code quality\n", - "- โœ… Full-stack capabilities (backend + frontend)\n", - "- โœ… Understanding of RAG systems\n", - "- โœ… Professional UI/UX design\n", - "\n", - "### Interview Talking Points:\n", - "- \"Built a configurable RAG system with 8+ tunable parameters\"\n", - "- \"Implemented multiple response synthesis strategies\"\n", - "- \"Created professional web UI with Gradio for ML applications\"\n", - "- \"Optimized for cost vs quality trade-offs\"\n", - "- \"Production-ready with error handling and validation\"\n", - "\n", - "---\n", - "\n", - "**You're now equipped to build sophisticated AI applications!** ๐ŸŽ‰" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "bootcamp", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Asheesh_Ranjan_Srivastava/README.md b/Asheesh_Ranjan_Srivastava/README.md deleted file mode 100644 index 6322621..0000000 --- a/Asheesh_Ranjan_Srivastava/README.md +++ /dev/null @@ -1,206 +0,0 @@ -# ๐Ÿš€ AI Engineering Bootcamp - Sprint Submissions - -**Student:** Asheesh Ranjan Srivastava -**Program:** OutSkill AI Engineering Bootcamp 2025 -**Cohort:** C2 - ---- - -## ๐Ÿ“š Project Index - -### ๐ŸŽฏ [Day 2: AI-Powered Text Summarization](./Day-2/) -**Built:** Multi-model text summarization system with professional export capabilities - -**Tech Stack:** Python โ€ข Transformers โ€ข Gradio โ€ข BART โ€ข T5 โ€ข Pegasus -**Key Features:** -- 4 AI models with smart caching -- Multi-format export (MD, JSON, Audio, PDF) -- Production-ready architecture -- 83% cost reduction through caching - -**Status:** โœ… Complete | ๐Ÿ“ [View Documentation](./Day-2/README.md) - ---- - -### ๐Ÿค– [Day 3: Multi-Persona Chatbot](./Day-3/) -**Built:** Conversational AI with personality switching and conversation management - -**Tech Stack:** Python โ€ข Streamlit โ€ข OpenAI API โ€ข JSON -**Key Features:** -- 4 distinct AI personas -- Multi-session chat management -- Export conversations (TXT/JSON/CSV) -- Real-time streaming responses - -**Status:** โœ… Complete | ๐Ÿ”— [Live Demo](http://questandcrossfire.com/chatbot) | ๐Ÿ“ [View Documentation](./Day-3/README.md) - ---- - -### โšก [Day 4: Automated LinkedIn Job Application System](./Day-4/) -**Built:** Email-based workflow automation with AI-powered personalization and video generation - -**Tech Stack:** n8n โ€ข Gmail API โ€ข OpenAI (GPT-5, GPT-4o) โ€ข HeyGen API โ€ข Google Sheets -**Key Features:** -- Processes 50+ jobs daily on autopilot -- Multi-model AI strategy (extraction, rating, cover letters, video scripts) -- Profile-matched rating system (0-5 scale) -- Conditional AI video generation for high-match jobs -- Human-AI collaboration case study - -**Status:** โœ… Complete | ๐Ÿ“ [View Documentation](./Day-4/README.md) | ๐ŸŽ“ [Learning Journey](./Day-4/README.md#the-real-learning-journey) - ---- - -### ๐ŸŽฏ [Day 5: Quest And Crossfire LinkedIn AI - Serverless Application](./Day-5/) -**Built:** AI-powered LinkedIn post generator with OAuth 2.0 authentication and serverless architecture - -**Tech Stack:** Vercel Serverless โ€ข n8n AI Agent โ€ข LinkedIn OAuth 2.0 โ€ข JWT โ€ข OpenAI GPT-4o-mini โ€ข Supabase -**Key Features:** -- LinkedIn OAuth 2.0 login with JWT session management -- AI post generation with Quest And Crossfire brand voice (200+ line system prompt) -- Direct LinkedIn publishing via Share API -- 3-layer security architecture (frontend gate + backend JWT + email whitelist) -- Private application with email whitelist protection -- Comprehensive documentation (7 files, 6,148 lines) - -**Status:** โœ… Complete | ๐Ÿ”— [Live App](https://quest-crossfire-linkedin-app.vercel.app) | ๐Ÿ’ป [GitHub](https://github.com/AsheeshSrivastava/quest-crossfire-linkedin-app) | ๐Ÿ“ [View Documentation](./Day-5/README.md) - ---- - -### ๐Ÿ“š [Days 6-7: Advanced RAG Application Development](./Day-6/) -**Built:** Complete RAG system with vector databases, advanced retrieval techniques, and production deployment - -> **Note:** The RAG work covered both Day 6 and Day 7 of the bootcamp (no separate Day 7 submission). All four RAG assignments and the enhanced production application are documented in the Day-6 folder. - -**Tech Stack:** LlamaIndex โ€ข LanceDB โ€ข HuggingFace Embeddings โ€ข OpenAI GPT-4o-mini โ€ข Gradio โ€ข Pydantic -**Key Achievements:** -- **Assignment 1:** Vector Database Basics (39 docs, 14,976 embeddings, semantic search) -- **Assignment 2:** Advanced RAG Techniques (SimilarityPostprocessor, TreeSummarize, Pydantic outputs, 50-60% cost savings) -- **Assignment 3a:** Basic Gradio RAG interface -- **Assignment 3b:** Advanced Gradio RAG with parameter tuning -- **Bonus:** Enhanced production application deployed to HuggingFace Spaces - -**Status:** โœ… Complete (Days 6-7, 4/4 Assignments) | ๐Ÿš€ [Live Demo](https://huggingface.co/spaces/asheeshsrivastava9/QnC) | ๐Ÿ“ [View Documentation](./Day-6/README.md) - ---- - -## ๐Ÿ› ๏ธ Skills Demonstrated - -**AI/ML:** -- Transformer model integration -- Prompt engineering (200+ line system prompts) -- Multi-model architecture -- RAG (Retrieval-Augmented Generation) systems -- Vector databases & semantic search -- Advanced retrieval techniques (SimilarityPostprocessor, TreeSummarize) -- Pydantic structured outputs -- System design for AI applications -- AI collaboration & orchestration - -**Automation & Integration:** -- n8n workflow automation -- Gmail API integration (OAuth2) -- LinkedIn API (OAuth 2.0 + Share API) -- Multi-API orchestration -- Scheduled task automation -- Error handling & graceful degradation - -**Backend:** -- OpenAI API integration (GPT-5, GPT-4o, GPT-4o-mini) -- HeyGen video generation API -- LlamaIndex RAG framework -- LanceDB vector database -- HuggingFace Transformers & Embeddings -- Vercel serverless functions -- JWT session management -- Session state management -- Caching strategies -- Base64 decoding & HTML parsing - -**Frontend:** -- Streamlit web apps -- Gradio interfaces (basic + advanced parameter tuning) -- Real-time streaming -- User interaction design -- Multi-tab interfaces - -**Security:** -- OAuth 2.0 implementation (LinkedIn) -- JWT authentication & session management -- 3-layer security architecture (frontend + backend + whitelist) -- Email whitelist protection -- Rate limiting (10/hour, 20/session) -- API key validation & format checking -- HTTP-only secure cookies -- Environment variable management - -**Professional Practices:** -- Comprehensive documentation -- Version control (Git) -- Open source licensing -- Security best practices (credentials, .gitignore) -- Professional code structure -- Human-AI collaboration transparency -- Systems thinking & first principles -- Cost optimization strategies (50-60% token savings) - ---- - -## ๐Ÿ“Š Progress Tracker - -| Sprint | Day | Status | Projects | -|--------|-----|--------|----------| -| **Sprint 1** | Day 1 | โณ Prep | Bootcamp orientation | -| **Sprint 1** | Day 2 | โœ… Complete | Text Summarization MVP | -| **Sprint 1** | Day 3 | โœ… Complete | Multi-Persona Chatbot | -| **Sprint 1** | Day 4 | โœ… Complete | LinkedIn Job Automation (n8n workflow) | -| **Sprint 2** | Day 5 | โœ… Complete | Quest And Crossfire LinkedIn AI (Serverless App) | -| **Sprint 3** | Days 6-7 | โœ… Complete | Advanced RAG Application (4 Assignments + Production Deployment) | -| **Sprint 4** | Day 8+ | ๐Ÿ”„ Upcoming | Coming soon... | - ---- - -## ๐Ÿ”— Quick Links - -**Live Deployments:** -- [Quest And Crossfire LinkedIn AI](https://quest-crossfire-linkedin-app.vercel.app) - Day 5 serverless app -- [Aethelgard Concept Generator](https://huggingface.co/spaces/asheeshsrivastava9/QnC) - Day 6 enhanced RAG app -- [Multi-Persona Chatbot](http://questandcrossfire.com/chatbot) - Day 3 project -- [Obsidian AI Assistant](http://questandcrossfire.com/obsidian) *(Portfolio project)* - -**GitHub Repositories:** -- [Quest And Crossfire LinkedIn AI](https://github.com/AsheeshSrivastava/quest-crossfire-linkedin-app) - Full source code & documentation -- [Aethelgard Concept Generator](https://github.com/AsheeshSrivastava/aethelgard-concept-generator) - Production RAG application - -**Documentation:** -- [Day 2 README](./Day-2/README.md) - Text Summarization -- [Day 3 README](./Day-3/README.md) - Multi-Persona Chatbot -- [Day 4 README](./Day-4/README.md) - LinkedIn Job Automation - - [Setup Guide](./Day-4/SETUP.md) - - [Credentials Guide](./Day-4/CREDENTIALS.md) - - [File Summary](./Day-4/FILE_SUMMARY.md) -- [Day 5 README](./Day-5/README.md) - Quest And Crossfire LinkedIn AI -- [Days 6-7 README](./Day-6/README.md) - Advanced RAG Application Development - ---- - -## ๐Ÿ’ก Project Philosophy - -Each project demonstrates: -- โœ… Production-ready code -- โœ… Comprehensive documentation -- โœ… Security best practices -- โœ… Real-world applicability -- โœ… Continuous learning mindset - ---- - -## ๐Ÿ“ฌ Connect - -Building AI applications and learning in public. - -*Part of QUEST AND CROSSFIREโ„ข* - ---- - -**Last Updated:** November 3, 2025 | Sprint 3 Complete (Days 6-7) diff --git a/Ashish_Sahu/Day_02/Day_2.ipynb b/Ashish_Sahu/Day_02/Day_2.ipynb deleted file mode 100644 index c0fc26e..0000000 --- a/Ashish_Sahu/Day_02/Day_2.ipynb +++ /dev/null @@ -1,2616 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "727909cd5ab340b4ac9b600190c64057": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_3e3c0948c9974a628543534f7d04aafd", - "IPY_MODEL_c8ecbd20b48a4c2e9620b7574ce69052", - "IPY_MODEL_19acb22b900e4075bc696995910b649b" - ], - "layout": "IPY_MODEL_97b38d20387247c1bb5cd4b6a1e32d02" - } - }, - "3e3c0948c9974a628543534f7d04aafd": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_9bfb9457e43643a3b1765703910cfb22", - "placeholder": "โ€‹", - "style": "IPY_MODEL_901a4a7371494aebbaab488d4f08711c", - "value": "config.json:โ€‡" - } - }, - "c8ecbd20b48a4c2e9620b7574ce69052": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_e9f260a1cd1441a68321934af091504d", - "max": 1, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_89da6fbc604442e2afe3ac895bcd9bbe", - "value": 1 - } - }, - "19acb22b900e4075bc696995910b649b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_36da726c039b461193e73ae64718df6e", - "placeholder": "โ€‹", - "style": "IPY_MODEL_5544182a7dfb410fa709806d45e70b7a", - "value": "โ€‡1.80k/?โ€‡[00:00<00:00,โ€‡102kB/s]" - } - }, - "97b38d20387247c1bb5cd4b6a1e32d02": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "9bfb9457e43643a3b1765703910cfb22": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "901a4a7371494aebbaab488d4f08711c": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "e9f260a1cd1441a68321934af091504d": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": "20px" - } - }, - "89da6fbc604442e2afe3ac895bcd9bbe": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "36da726c039b461193e73ae64718df6e": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "5544182a7dfb410fa709806d45e70b7a": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "5e31229135d1402e8d0e39f29a4e102b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_f9dd9fe4fbe24c2e8984d0f21b929c95", - "IPY_MODEL_63e5a710ca9340aaa33c57d0a091111a", - "IPY_MODEL_5975a2e8705e4f59832530bcd5794735" - ], - "layout": "IPY_MODEL_d3063a4fab984425b599a684a311a6ba" - } - }, - "f9dd9fe4fbe24c2e8984d0f21b929c95": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_3ca2033e59b048a5b50b227e0fb41bcf", - "placeholder": "โ€‹", - "style": "IPY_MODEL_333745180c194ad0b5ea80c6f37f6c74", - "value": "pytorch_model.bin:โ€‡100%" - } - }, - "63e5a710ca9340aaa33c57d0a091111a": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_9859f5e8da6944d49758dd5cc668092f", - "max": 1222317369, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_95a9dd3a768c4f5ab22c72a92a884fed", - "value": 1222317369 - } - }, - "5975a2e8705e4f59832530bcd5794735": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_0f1c304f84294c3abdd2238486d12d60", - "placeholder": "โ€‹", - "style": "IPY_MODEL_0085ef8dc8634205a78d50cbf38f2332", - "value": "โ€‡1.22G/1.22Gโ€‡[00:24<00:00,โ€‡104MB/s]" - } - }, - "d3063a4fab984425b599a684a311a6ba": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "3ca2033e59b048a5b50b227e0fb41bcf": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "333745180c194ad0b5ea80c6f37f6c74": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "9859f5e8da6944d49758dd5cc668092f": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "95a9dd3a768c4f5ab22c72a92a884fed": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "0f1c304f84294c3abdd2238486d12d60": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "0085ef8dc8634205a78d50cbf38f2332": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "1d31b2abe3c34dbf96c6fdd26af3ec86": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_60155bef0a9446339cd8cde59d3c336c", - "IPY_MODEL_6512db6ab093427a877ce49fbb326d0b", - "IPY_MODEL_2e37582e42334592a41acf9f2841eb9c" - ], - "layout": "IPY_MODEL_3d7a7eb303ff424cb144711331b92b1b" - } - }, - "60155bef0a9446339cd8cde59d3c336c": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_f4efa10a0add4678866c12f4f2b149f0", - "placeholder": "โ€‹", - "style": "IPY_MODEL_b133bf4e00b7456fa0ea9e4a72085ecf", - "value": "model.safetensors:โ€‡100%" - } - }, - "6512db6ab093427a877ce49fbb326d0b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_9c18cd4ec92c4023a81f90e5f38e630d", - "max": 1222284424, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_e637391630b04600b49b10bdbb8d28de", - "value": 1222284424 - } - }, - "2e37582e42334592a41acf9f2841eb9c": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_739557f864db486da811fa5862378fb1", - "placeholder": "โ€‹", - "style": "IPY_MODEL_8d679889f4e64ca9871f86118838bb86", - "value": "โ€‡1.22G/1.22Gโ€‡[00:19<00:00,โ€‡29.4MB/s]" - } - }, - "3d7a7eb303ff424cb144711331b92b1b": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "f4efa10a0add4678866c12f4f2b149f0": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b133bf4e00b7456fa0ea9e4a72085ecf": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "9c18cd4ec92c4023a81f90e5f38e630d": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "e637391630b04600b49b10bdbb8d28de": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "739557f864db486da811fa5862378fb1": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "8d679889f4e64ca9871f86118838bb86": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "b1e614f871344ea1ac4517b9202bcc7a": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e2a5633f4b684038b91434eb7f803ab4", - "IPY_MODEL_4e75c85166f9450382676506d7b759f2", - "IPY_MODEL_fa5b3b555ac2463c8c6676bd38611982" - ], - "layout": "IPY_MODEL_cc879ed7f8c6406794ee10390cfbbc1e" - } - }, - "e2a5633f4b684038b91434eb7f803ab4": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_f3c09ad6e82843809184ceec27985099", - "placeholder": "โ€‹", - "style": "IPY_MODEL_afc54157d6344cf2a2ed94e8212e1c35", - "value": "tokenizer_config.json:โ€‡100%" - } - }, - "4e75c85166f9450382676506d7b759f2": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_6a706d9e11684f23874587116e38ce94", - "max": 26, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_81bb7600318e4ec7a6112859b6769832", - "value": 26 - } - }, - "fa5b3b555ac2463c8c6676bd38611982": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_87da78c75381482bb3bd55792a2d6bda", - "placeholder": "โ€‹", - "style": "IPY_MODEL_e0f35f2f40e44ea3b3b700bee0bdceb5", - "value": "โ€‡26.0/26.0โ€‡[00:00<00:00,โ€‡1.94kB/s]" - } - }, - "cc879ed7f8c6406794ee10390cfbbc1e": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "f3c09ad6e82843809184ceec27985099": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "afc54157d6344cf2a2ed94e8212e1c35": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "6a706d9e11684f23874587116e38ce94": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "81bb7600318e4ec7a6112859b6769832": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "87da78c75381482bb3bd55792a2d6bda": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "e0f35f2f40e44ea3b3b700bee0bdceb5": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "dc4c9797fc08481eba1cb1a1ab0d7aa5": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_f54a2f74dc0f4f57bbfb2ff38873e6f9", - "IPY_MODEL_5e78d953d73241ae88dcbb76afb4f69c", - "IPY_MODEL_92bb10a46d82469b8e7350cc568b38cb" - ], - "layout": "IPY_MODEL_dbd33a0d122b4750ad73a54fb3d0cbe1" - } - }, - "f54a2f74dc0f4f57bbfb2ff38873e6f9": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_64909850690649f18bc089ef06c96331", - "placeholder": "โ€‹", - "style": "IPY_MODEL_f265f11bbd9946cd8cfb2f2d1a0edfd7", - "value": "vocab.json:โ€‡" - } - }, - "5e78d953d73241ae88dcbb76afb4f69c": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_46fa1fb557884eaaa13c94f4aabd853c", - "max": 1, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_434f5c4a9a5c43a68ebb5d3d150dc6c9", - "value": 1 - } - }, - "92bb10a46d82469b8e7350cc568b38cb": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_b84d67bfea1343678a2631a12f03111e", - "placeholder": "โ€‹", - "style": "IPY_MODEL_ed680a13028341afbb1f82b4d63f031d", - "value": "โ€‡899k/?โ€‡[00:00<00:00,โ€‡2.11MB/s]" - } - }, - "dbd33a0d122b4750ad73a54fb3d0cbe1": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "64909850690649f18bc089ef06c96331": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "f265f11bbd9946cd8cfb2f2d1a0edfd7": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "46fa1fb557884eaaa13c94f4aabd853c": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": "20px" - } - }, - "434f5c4a9a5c43a68ebb5d3d150dc6c9": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "b84d67bfea1343678a2631a12f03111e": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "ed680a13028341afbb1f82b4d63f031d": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "5797ecbe945949fbbfb6c668f52d336a": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_f564438a6b024a0688953df9c89e81b2", - "IPY_MODEL_d29ab5f7ca21464a8b6d9c13f7d2ffdd", - "IPY_MODEL_11e20fd675164237ba5db7b4d67668d3" - ], - "layout": "IPY_MODEL_c133625545864ab7b2c4996bb6018eee" - } - }, - "f564438a6b024a0688953df9c89e81b2": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_6049e4f95408480bba374589f15d86d9", - "placeholder": "โ€‹", - "style": "IPY_MODEL_60dab349c56a4cafa52cfbee8d878420", - "value": "merges.txt:โ€‡" - } - }, - "d29ab5f7ca21464a8b6d9c13f7d2ffdd": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_30644bbd2538430c9f0eaace80230bb0", - "max": 1, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_1838bfb8f6af46fdb1e7ddc0ea5ed274", - "value": 1 - } - }, - "11e20fd675164237ba5db7b4d67668d3": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_e3ffcc0238c1415ab9a3f84b08e928b7", - "placeholder": "โ€‹", - "style": "IPY_MODEL_fbb3b043790f4ee1a90e8ab7148054c9", - "value": "โ€‡456k/?โ€‡[00:00<00:00,โ€‡1.45MB/s]" - } - }, - "c133625545864ab7b2c4996bb6018eee": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6049e4f95408480bba374589f15d86d9": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "60dab349c56a4cafa52cfbee8d878420": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "30644bbd2538430c9f0eaace80230bb0": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": "20px" - } - }, - "1838bfb8f6af46fdb1e7ddc0ea5ed274": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "e3ffcc0238c1415ab9a3f84b08e928b7": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "fbb3b043790f4ee1a90e8ab7148054c9": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - } - } - } - }, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "GRADIO SYNTAX" - ], - "metadata": { - "id": "qBdN3hkkfH6R" - } - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wR4y7UudewDw", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 611 - }, - "outputId": "8b0008d5-9b0d-49d8-8b75-339157d94f3c" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n", - "* Running on public URL: https://a3691cc43fc782fcb1.gradio.live\n", - "\n", - "This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" - ] - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "
" - ] - }, - "metadata": {} - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [] - }, - "metadata": {}, - "execution_count": 1 - } - ], - "source": [ - "import gradio as gr\n", - "\n", - "def greet(name):\n", - " return \"Hello, \" + name + \"!\"\n", - "\n", - "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n", - "\n", - "# To create a shareable link (valid for 72 hours)\n", - "demo.launch(share=True)" - ] - }, - { - "cell_type": "markdown", - "source": [ - "HUGGING FACE TOKEN LOGISTICS" - ], - "metadata": { - "id": "53iZXyE0fFTI" - } - }, - { - "cell_type": "code", - "source": [ - "!pip install huggingface_hub\n", - "\n", - "from huggingface_hub import whoami\n", - "from google.colab import userdata\n", - "\n", - "# Get your Hugging Face token from Colab Secrets\n", - "hf_token = userdata.get('HF_TOKEN')\n", - "\n", - "# Verify the token by checking your identity\n", - "try:\n", - " user_info = whoami(token=hf_token)\n", - " print(f\"Logged in as: {user_info['name']}\")\n", - "except Exception as e:\n", - " print(f\"Could not log in: {e}\")\n", - " print(\"Please make sure you have added your Hugging Face token to Colab Secrets with the name 'HF_TOKEN'\")" - ], - "metadata": { - "id": "BvuK-FRUe4sz", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "e502fb54-c978-404d-f272-b48927a5beda" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.12/dist-packages (0.35.3)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (3.20.0)\n", - "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2025.3.0)\n", - "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (25.0)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (6.0.3)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2.32.4)\n", - "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.67.1)\n", - "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.15.0)\n", - "Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (1.1.10)\n", - "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.4.4)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.11)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2.5.0)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2025.10.5)\n", - "Logged in as: ashisa\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "IF YOU WISH TO LOAD SOME DATASET TO TEST ANYTHING" - ], - "metadata": { - "id": "SJ7RyQEXfKqf" - } - }, - { - "cell_type": "code", - "source": [ - "from datasets import load_dataset\n", - "\n", - "# Load a dataset (e.g., the SQuAD dataset for question answering)\n", - "dataset = load_dataset(\"squad\")\n", - "\n", - "# Print information about the dataset\n", - "print(dataset)\n", - "\n", - "# Access an example from the training set\n", - "print(\"\\nExample from the training set:\")\n", - "print(dataset[\"train\"][0])" - ], - "metadata": { - "id": "kKofrkg0fCEM" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "source": [ - "SAMPLE SUMMARISATION CODE" - ], - "metadata": { - "id": "ixTksrfyfNwe" - } - }, - { - "cell_type": "code", - "source": [ - "from transformers import pipeline\n", - "\n", - "# Load the summarization pipeline\n", - "summarizer = pipeline(\"summarization\")\n", - "\n", - "# Text to summarize\n", - "text = \"\"\"\n", - "Hugging Face is a company and open-source platform that provides tools and models for natural language processing (NLP). It has become a central hub for the ML community, offering a wide range of pre-trained models that can be easily used or fine-tuned for specific applications. Key aspects of Hugging Face include the Transformers library, Model Hub, Datasets library, and Tokenizers library. Hugging Face democratizes access to powerful ML models, making it easier for developers and researchers to build and deploy applications.\n", - "\"\"\"\n", - "\n", - "# Summarize the text\n", - "summary = summarizer(text, max_length=50, min_length=25, do_sample=False)\n", - "\n", - "print(\"Original Text:\")\n", - "print(text)\n", - "print(\"\\nSummary:\")\n", - "print(summary[0]['summary_text'])" - ], - "metadata": { - "id": "x6UgM-Rse5dw", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 403, - "referenced_widgets": [ - "727909cd5ab340b4ac9b600190c64057", - "3e3c0948c9974a628543534f7d04aafd", - "c8ecbd20b48a4c2e9620b7574ce69052", - "19acb22b900e4075bc696995910b649b", - "97b38d20387247c1bb5cd4b6a1e32d02", - "9bfb9457e43643a3b1765703910cfb22", - "901a4a7371494aebbaab488d4f08711c", - "e9f260a1cd1441a68321934af091504d", - "89da6fbc604442e2afe3ac895bcd9bbe", - "36da726c039b461193e73ae64718df6e", - "5544182a7dfb410fa709806d45e70b7a", - "5e31229135d1402e8d0e39f29a4e102b", - "f9dd9fe4fbe24c2e8984d0f21b929c95", - "63e5a710ca9340aaa33c57d0a091111a", - "5975a2e8705e4f59832530bcd5794735", - "d3063a4fab984425b599a684a311a6ba", - "3ca2033e59b048a5b50b227e0fb41bcf", - "333745180c194ad0b5ea80c6f37f6c74", - "9859f5e8da6944d49758dd5cc668092f", - "95a9dd3a768c4f5ab22c72a92a884fed", - "0f1c304f84294c3abdd2238486d12d60", - "0085ef8dc8634205a78d50cbf38f2332", - "1d31b2abe3c34dbf96c6fdd26af3ec86", - "60155bef0a9446339cd8cde59d3c336c", - "6512db6ab093427a877ce49fbb326d0b", - "2e37582e42334592a41acf9f2841eb9c", - "3d7a7eb303ff424cb144711331b92b1b", - "f4efa10a0add4678866c12f4f2b149f0", - "b133bf4e00b7456fa0ea9e4a72085ecf", - "9c18cd4ec92c4023a81f90e5f38e630d", - "e637391630b04600b49b10bdbb8d28de", - "739557f864db486da811fa5862378fb1", - "8d679889f4e64ca9871f86118838bb86", - "b1e614f871344ea1ac4517b9202bcc7a", - "e2a5633f4b684038b91434eb7f803ab4", - "4e75c85166f9450382676506d7b759f2", - "fa5b3b555ac2463c8c6676bd38611982", - "cc879ed7f8c6406794ee10390cfbbc1e", - "f3c09ad6e82843809184ceec27985099", - "afc54157d6344cf2a2ed94e8212e1c35", - "6a706d9e11684f23874587116e38ce94", - "81bb7600318e4ec7a6112859b6769832", - "87da78c75381482bb3bd55792a2d6bda", - "e0f35f2f40e44ea3b3b700bee0bdceb5", - "dc4c9797fc08481eba1cb1a1ab0d7aa5", - "f54a2f74dc0f4f57bbfb2ff38873e6f9", - "5e78d953d73241ae88dcbb76afb4f69c", - "92bb10a46d82469b8e7350cc568b38cb", - "dbd33a0d122b4750ad73a54fb3d0cbe1", - "64909850690649f18bc089ef06c96331", - "f265f11bbd9946cd8cfb2f2d1a0edfd7", - "46fa1fb557884eaaa13c94f4aabd853c", - "434f5c4a9a5c43a68ebb5d3d150dc6c9", - "b84d67bfea1343678a2631a12f03111e", - "ed680a13028341afbb1f82b4d63f031d", - "5797ecbe945949fbbfb6c668f52d336a", - "f564438a6b024a0688953df9c89e81b2", - "d29ab5f7ca21464a8b6d9c13f7d2ffdd", - "11e20fd675164237ba5db7b4d67668d3", - "c133625545864ab7b2c4996bb6018eee", - "6049e4f95408480bba374589f15d86d9", - "60dab349c56a4cafa52cfbee8d878420", - "30644bbd2538430c9f0eaace80230bb0", - "1838bfb8f6af46fdb1e7ddc0ea5ed274", - "e3ffcc0238c1415ab9a3f84b08e928b7", - "fbb3b043790f4ee1a90e8ab7148054c9" - ] - }, - "outputId": "5fa23acf-d35b-4dba-a8c0-0395a27d79ba" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "No model was supplied, defaulted to sshleifer/distilbart-cnn-12-6 and revision a4f8f3e (https://huggingface.co/sshleifer/distilbart-cnn-12-6).\n", - "Using a pipeline without specifying a model name and revision in production is not recommended.\n" - ] - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "config.json: 0.00B [00:00, ?B/s]" - ], - "application/vnd.jupyter.widget-view+json": { - "version_major": 2, - "version_minor": 0, - "model_id": "727909cd5ab340b4ac9b600190c64057" - } - }, - "metadata": {} - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "pytorch_model.bin: 0%| | 0.00/1.22G [00:00=2.27 in /usr/local/lib/python3.12/dist-packages (from gtts) (2.32.4)\n", - "Collecting click<8.2,>=7.1 (from gtts)\n", - " Downloading click-8.1.8-py3-none-any.whl.metadata (2.3 kB)\n", - "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3,>=2.27->gtts) (3.4.4)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests<3,>=2.27->gtts) (3.11)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3,>=2.27->gtts) (2.5.0)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests<3,>=2.27->gtts) (2025.10.5)\n", - "Downloading gTTS-2.5.4-py3-none-any.whl (29 kB)\n", - "Downloading click-8.1.8-py3-none-any.whl (98 kB)\n", - "\u001b[?25l \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m0.0/98.2 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m98.2/98.2 kB\u001b[0m \u001b[31m7.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hInstalling collected packages: click, gtts\n", - " Attempting uninstall: click\n", - " Found existing installation: click 8.3.0\n", - " Uninstalling click-8.3.0:\n", - " Successfully uninstalled click-8.3.0\n", - "Successfully installed click-8.1.8 gtts-2.5.4\n" - ] - }, - { - "output_type": "display_data", - "data": { - "application/vnd.colab-display-data+json": { - "pip_warning": { - "packages": [ - "click" - ] - }, - "id": "b530fc6558744f1aa885e1a067a35018" - } - }, - "metadata": {} - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "ASSIGNMENT GOES BELOW -" - ], - "metadata": { - "id": "2uRw9YNZfP13" - } - }, - { - "cell_type": "code", - "source": [ - "import gradio as gr\n", - "from transformers import pipeline\n", - "import numpy as np\n", - "from gtts import gTTS\n", - "\n", - "# Load summarization pipeline\n", - "summarizer = pipeline(\"summarization\", model=\"facebook/bart-large-cnn\")\n", - "\n", - "# Load smaller text-to-speech model\n", - "tts = pipeline(\"text-to-speech\", model=\"suno/bark-small\")\n", - "\n", - "def summarize(text):\n", - " # Generate summary\n", - " summary = summarizer(text, max_length=50, min_length=25, do_sample=False)\n", - " summary_text = summary[0]['summary_text']\n", - "\n", - " tts = gTTS(summary_text)\n", - " tts.save(\"summary.wav\")\n", - "\n", - " # Save summary to text file\n", - " with open(\"summary.txt\", \"w\") as f:\n", - " f.write(summary_text)\n", - "\n", - " # Return summary text, file path, and audio file\n", - " return summary_text, \"summary.txt\", \"summary.wav\"\n", - "\n", - "# Gradio UI\n", - "demo = gr.Interface(\n", - " fn=summarize,\n", - " inputs=gr.Textbox(lines=10, label=\"Enter text to summarize\"),\n", - " outputs=[\n", - " gr.Textbox(label=\"Summary\"),\n", - " gr.File(label=\"Download Summary\"),\n", - " gr.Audio(label=\"Listen to Summary\")\n", - " ],\n", - " title=\"Text Summarizer with TTS (Lightweight Model)\"\n", - ")\n", - "\n", - "demo.launch(share=True, debug=True)\n" - ], - "metadata": { - "id": "bCAb092HfRrj", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 680 - }, - "outputId": "6d347c37-afd6-44d6-9ac6-0c805843cbf1" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "Device set to use cuda:0\n", - "Device set to use cuda:0\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n", - "* Running on public URL: https://0080c6c2e8d448c970.gradio.live\n", - "\n", - "This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" - ] - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "
" - ] - }, - "metadata": {} - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Keyboard interruption in main thread... closing server.\n", - "Killing tunnel 127.0.0.1:7860 <> https://0080c6c2e8d448c970.gradio.live\n" - ] - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [] - }, - "metadata": {}, - "execution_count": 1 - } - ] - } - ] -} \ No newline at end of file diff --git a/Ashish_Sahu/Day_02/Readme.md b/Ashish_Sahu/Day_02/Readme.md deleted file mode 100644 index 6573313..0000000 --- a/Ashish_Sahu/Day_02/Readme.md +++ /dev/null @@ -1,5 +0,0 @@ -# Day 2 Submission - -## Text Summarizer - -The assignment submission for day 2 - Gradio app that summarizes given text, provides download functionality and also generates an audio file for the summary. diff --git a/Ashish_Sahu/Day_03/app.py b/Ashish_Sahu/Day_03/app.py deleted file mode 100644 index f100e75..0000000 --- a/Ashish_Sahu/Day_03/app.py +++ /dev/null @@ -1,546 +0,0 @@ -import streamlit as st -from datetime import datetime -from utils.config_manager import load_config, save_config, get_active_model, get_provider_config -from utils.file_processor import process_uploaded_file -from utils.llm_provider import LLMProvider, prepare_messages_with_files -from utils.chat_history import save_chat, load_chat, list_chats, delete_chat, generate_chat_title -from utils.personality_prompts import get_personality_prompt, get_personality_descriptions -from utils.export_manager import export_to_txt, export_to_json, export_to_csv, generate_filename, calculate_statistics - -st.set_page_config( - page_title="ChatGPT-like App", - page_icon="๐Ÿ’ฌ", - layout="wide", - initial_sidebar_state="expanded" -) - -if 'messages' not in st.session_state: - st.session_state.messages = [] - -if 'uploaded_files' not in st.session_state: - st.session_state.uploaded_files = [] - -if 'config' not in st.session_state: - st.session_state.config = load_config() - -if 'current_chat_id' not in st.session_state: - st.session_state.current_chat_id = None - -if 'current_chat_title' not in st.session_state: - st.session_state.current_chat_title = "New Chat" - -if 'pending_file_uploads' not in st.session_state: - st.session_state.pending_file_uploads = [] - -if 'translation_mode' not in st.session_state: - st.session_state.translation_mode = False - -if 'target_language' not in st.session_state: - st.session_state.target_language = "English" - -if 'personality_mode' not in st.session_state: - st.session_state.personality_mode = "Professional" - -if 'custom_personality' not in st.session_state: - st.session_state.custom_personality = "" - -def initialize_session_from_config(): - config = st.session_state.config - - if 'provider' not in st.session_state: - st.session_state.provider = config['settings']['provider'] - - if 'mode' not in st.session_state: - st.session_state.mode = config['settings']['mode'] - - if 'temperature' not in st.session_state: - st.session_state.temperature = config['settings']['temperature'] - - if 'max_tokens' not in st.session_state: - st.session_state.max_tokens = config['settings']['max_tokens'] - - if 'custom_instructions' not in st.session_state: - st.session_state.custom_instructions = config['settings']['custom_instructions'] - - if 'translation_mode' not in st.session_state: - st.session_state.translation_mode = config['settings'].get('translation_mode', False) - - if 'target_language' not in st.session_state: - st.session_state.target_language = config['settings'].get('target_language', 'English') - - if 'personality_mode' not in st.session_state: - st.session_state.personality_mode = config['settings'].get('personality_mode', 'Professional') - - if 'custom_personality' not in st.session_state: - st.session_state.custom_personality = config['settings'].get('custom_personality', '') - -initialize_session_from_config() - -with st.sidebar: - st.title("โš™๏ธ Settings") - - with st.expander("๐ŸŽญ AI Personality", expanded=False): - personality_descriptions = get_personality_descriptions() - - personality_mode = st.selectbox( - "Select Personality Mode", - ["Professional", "Creative", "Technical", "Friendly", "Custom"], - index=["Professional", "Creative", "Technical", "Friendly", "Custom"].index(st.session_state.personality_mode), - key="personality_select", - help="Choose the AI personality that best fits your needs" - ) - - previous_personality = st.session_state.personality_mode - st.session_state.personality_mode = personality_mode - st.session_state.config['settings']['personality_mode'] = personality_mode - - st.caption(personality_descriptions.get(personality_mode, "")) - - if personality_mode == "Custom": - custom_personality = st.text_area( - "Custom Personality Instructions", - value=st.session_state.custom_personality, - height=200, - key="custom_personality_input", - placeholder="Define your custom AI personality here. Describe the style, tone, expertise, and approach you want the AI to use...", - help="Create your own AI personality by describing how you want it to respond" - ) - st.session_state.custom_personality = custom_personality - st.session_state.config['settings']['custom_personality'] = custom_personality - - if not custom_personality.strip(): - st.warning("โš ๏ธ Custom personality is empty. The AI will use default behavior.") - else: - personality_info = get_personality_prompt(personality_mode) - with st.expander("โ„น๏ธ View Personality Details", expanded=False): - st.markdown(f"**Name:** {personality_info['name']}") - st.markdown(f"**Expertise:** {personality_info['expertise']}") - st.markdown(f"**Example Response:** *{personality_info['example']}*") - - if personality_mode != previous_personality: - st.info("๐Ÿ’ก Personality changed! The new personality will apply to your next message.") - - with st.expander("๐ŸŒ Translation Settings", expanded=False): - translation_mode = st.toggle( - "Enable Translation Mode", - value=st.session_state.translation_mode, - key="translation_toggle", - help="When enabled, automatically detects language and translates to your target language" - ) - - previous_language = st.session_state.target_language - st.session_state.translation_mode = translation_mode - st.session_state.config['settings']['translation_mode'] = translation_mode - - if translation_mode: - st.info("Translation mode is active. Every message will be translated to your target language with cultural context.") - - languages = [ - "English", "Spanish", "French", "German", "Italian", "Portuguese", - "Russian", "Japanese", "Chinese (Simplified)", "Chinese (Traditional)", - "Korean", "Arabic", "Hindi", "Turkish", "Dutch", "Polish", - "Swedish", "Norwegian", "Danish", "Finnish", "Greek", "Hebrew", - "Thai", "Vietnamese", "Indonesian", "Malay", "Tagalog" - ] - - target_language = st.selectbox( - "Target Language", - languages, - index=languages.index(st.session_state.target_language) if st.session_state.target_language in languages else 0, - key="target_lang_select" - ) - - if target_language != previous_language: - st.session_state.messages = [] - st.session_state.uploaded_files = [] - st.session_state.current_chat_id = None - st.session_state.current_chat_title = "New Chat" - - st.session_state.target_language = target_language - st.session_state.config['settings']['target_language'] = target_language - - with st.expander("๐Ÿ“ Custom Instructions", expanded=False): - custom_instructions = st.text_area( - "Custom Instructions (System Prompt)", - value=st.session_state.custom_instructions, - height=150, - key="custom_inst", - disabled=st.session_state.translation_mode, - help="Disabled when translation mode is active" if st.session_state.translation_mode else "" - ) - if not st.session_state.translation_mode: - st.session_state.custom_instructions = custom_instructions - st.session_state.config['settings']['custom_instructions'] = custom_instructions - - char_count = len(custom_instructions) - st.caption(f"Characters: {char_count}") - - with st.expander("๐Ÿ”Œ Provider Configuration", expanded=False): - provider = st.radio( - "Select Provider", - ["openrouter", "ollama"], - index=0 if st.session_state.provider == "openrouter" else 1, - key="provider_radio" - ) - st.session_state.provider = provider - - if provider == "openrouter": - st.subheader("OpenRouter") - base_url = st.text_input( - "Base URL", - value=st.session_state.config['openrouter']['base_url'], - key="or_base_url" - ) - api_key = st.text_input( - "API Key", - value=st.session_state.config['openrouter']['api_key'], - type="password", - key="or_api_key" - ) - mini_model = st.text_input( - "Mini Model", - value=st.session_state.config['openrouter']['mini_model'], - key="or_mini" - ) - thinking_model = st.text_input( - "Thinking Model", - value=st.session_state.config['openrouter']['thinking_model'], - key="or_thinking" - ) - multimodal_model = st.text_input( - "Multimodal Model", - value=st.session_state.config['openrouter']['multimodal_model'], - key="or_multimodal" - ) - - st.session_state.config['openrouter']['base_url'] = base_url - st.session_state.config['openrouter']['api_key'] = api_key - st.session_state.config['openrouter']['mini_model'] = mini_model - st.session_state.config['openrouter']['thinking_model'] = thinking_model - st.session_state.config['openrouter']['multimodal_model'] = multimodal_model - - if st.button("Test OpenRouter Connection", key="test_or"): - provider_config = get_provider_config(st.session_state.config) - llm = LLMProvider("openrouter", provider_config, st.session_state.config['settings']) - success, message = llm.test_connection() - if success: - st.success(message) - else: - st.error(f"Connection failed: {message}") - - else: - st.subheader("Ollama") - endpoint = st.text_input( - "Endpoint URL", - value=st.session_state.config['ollama']['endpoint'], - key="ollama_endpoint" - ) - mini_model = st.text_input( - "Mini Model", - value=st.session_state.config['ollama']['mini_model'], - key="ollama_mini" - ) - thinking_model = st.text_input( - "Thinking Model", - value=st.session_state.config['ollama']['thinking_model'], - key="ollama_thinking" - ) - multimodal_model = st.text_input( - "Multimodal Model", - value=st.session_state.config['ollama']['multimodal_model'], - key="ollama_multimodal" - ) - - st.session_state.config['ollama']['endpoint'] = endpoint - st.session_state.config['ollama']['mini_model'] = mini_model - st.session_state.config['ollama']['thinking_model'] = thinking_model - st.session_state.config['ollama']['multimodal_model'] = multimodal_model - - if st.button("Test Ollama Connection", key="test_ollama"): - provider_config = get_provider_config(st.session_state.config) - llm = LLMProvider("ollama", provider_config, st.session_state.config['settings']) - success, message = llm.test_connection() - if success: - st.success(message) - else: - st.error(f"Connection failed: {message}") - - with st.expander("๐ŸŽ›๏ธ Model Settings", expanded=False): - temperature = st.slider( - "Temperature", - min_value=0.0, - max_value=2.0, - value=st.session_state.temperature, - step=0.1, - key="temp_slider" - ) - st.session_state.temperature = temperature - - max_tokens = st.slider( - "Max Tokens", - min_value=100, - max_value=8000, - value=st.session_state.max_tokens, - step=100, - key="tokens_slider" - ) - st.session_state.max_tokens = max_tokens - - st.session_state.config['settings']['temperature'] = temperature - st.session_state.config['settings']['max_tokens'] = max_tokens - st.session_state.config['settings']['provider'] = provider - - - with st.expander("๐Ÿ“ Current Context Files", expanded=False): - if st.session_state.uploaded_files: - st.subheader("Files in Context:") - for idx, file in enumerate(st.session_state.uploaded_files): - col1, col2 = st.columns([4, 1]) - with col1: - if file.get('error'): - st.error(f"โŒ {file['name']}: {file['error']}") - else: - st.success(f"โœ… {file['name']} ({file['type']})") - with col2: - if st.button("๐Ÿ—‘๏ธ", key=f"remove_{idx}"): - st.session_state.uploaded_files.pop(idx) - st.rerun() - else: - st.caption("No files uploaded yet") - - with st.expander("๐Ÿ“ค Export Conversation", expanded=False): - if st.session_state.messages: - stats = calculate_statistics(st.session_state.messages) - - st.markdown("**Conversation Statistics**") - col_stat1, col_stat2 = st.columns(2) - with col_stat1: - st.metric("Total Messages", stats['total_messages']) - st.metric("User Messages", stats['user_messages']) - with col_stat2: - st.metric("AI Messages", stats['assistant_messages']) - st.metric("Total Words", stats['total_words']) - - st.divider() - st.markdown("**Export Formats**") - - txt_data = export_to_txt( - st.session_state.messages, - st.session_state.current_chat_title, - st.session_state.personality_mode, - st.session_state.translation_mode, - st.session_state.target_language - ) - st.download_button( - label="๐Ÿ“„ Download as TXT", - data=txt_data, - file_name=generate_filename(st.session_state.current_chat_title, "txt"), - mime="text/plain", - use_container_width=True, - key="export_txt" - ) - - json_data = export_to_json( - st.session_state.messages, - st.session_state.current_chat_title, - st.session_state.personality_mode, - st.session_state.translation_mode, - st.session_state.target_language - ) - st.download_button( - label="๐Ÿ“‹ Download as JSON", - data=json_data, - file_name=generate_filename(st.session_state.current_chat_title, "json"), - mime="application/json", - use_container_width=True, - key="export_json" - ) - - csv_data = export_to_csv( - st.session_state.messages, - st.session_state.current_chat_title - ) - st.download_button( - label="๐Ÿ“Š Download as CSV", - data=csv_data, - file_name=generate_filename(st.session_state.current_chat_title, "csv"), - mime="text/csv", - use_container_width=True, - key="export_csv" - ) - - st.caption("๐Ÿ’ก TXT for reading, JSON for data, CSV for analysis") - else: - st.info("Start a conversation to enable export") - - st.divider() - - col1, col2 = st.columns(2) - with col1: - if st.button("๐Ÿ’พ Save Config", use_container_width=True): - if save_config(st.session_state.config): - st.success("Saved!") - else: - st.error("Save failed") - - with col2: - if st.button("๐Ÿ”„ Reset", use_container_width=True): - st.session_state.config = load_config() - st.rerun() - - st.divider() - st.subheader("๐Ÿ’ฌ Chat History") - - col1, col2 = st.columns(2) - with col1: - if st.button("โž• New Chat", use_container_width=True): - st.session_state.messages = [] - st.session_state.uploaded_files = [] - st.session_state.current_chat_id = None - st.session_state.current_chat_title = "New Chat" - st.rerun() - - with col2: - if st.button("๐Ÿ—‘๏ธ Clear", use_container_width=True): - st.session_state.messages = [] - st.session_state.uploaded_files = [] - st.session_state.current_chat_id = None - st.session_state.current_chat_title = "New Chat" - st.rerun() - - chats = list_chats() - if chats: - st.caption(f"Found {len(chats)} saved chat(s)") - for chat in chats[:10]: - col1, col2 = st.columns([4, 1]) - with col1: - if st.button( - f"๐Ÿ“„ {chat['title'][:30]}", - key=f"load_{chat['id']}", - use_container_width=True - ): - chat_data = load_chat(chat['id']) - if chat_data: - st.session_state.messages = chat_data['messages'] - st.session_state.current_chat_id = chat['id'] - st.session_state.current_chat_title = chat['title'] - st.session_state.uploaded_files = [] - st.rerun() - with col2: - if st.button("๐Ÿ—‘๏ธ", key=f"del_{chat['id']}"): - if delete_chat(chat['id']): - st.rerun() - else: - st.caption("No saved chats") - -st.title("๐Ÿ’ฌ ChatGPT-like App") - -col_title1, col_title2, col_title3 = st.columns([2, 1, 1]) -with col_title1: - st.caption(f"**Chat:** {st.session_state.current_chat_title}") -with col_title2: - if not st.session_state.translation_mode: - personality_emoji = { - "Professional": "๐Ÿ’ผ", - "Creative": "๐ŸŽจ", - "Technical": "โš™๏ธ", - "Friendly": "๐Ÿ˜Š", - "Custom": "โœจ" - } - emoji = personality_emoji.get(st.session_state.personality_mode, "๐Ÿค–") - st.caption(f"{emoji} {st.session_state.personality_mode}") -with col_title3: - if st.session_state.translation_mode: - st.caption(f"๐ŸŒ {st.session_state.target_language}") - -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - -col1, col2, col3 = st.columns([1, 6, 1]) - -with col1: - mode = st.selectbox( - "Mode", - ["mini", "thinking"], - index=0 if not hasattr(st.session_state, 'mode') or st.session_state.mode == "mini" else 1, - key="mode_selector", - label_visibility="collapsed" - ) - st.session_state.mode = mode - -with col2: - prompt = st.chat_input("Type your message...") - -with col3: - uploaded_file_input = st.file_uploader( - "๐Ÿ“Ž", - accept_multiple_files=True, - type=['txt', 'pdf', 'docx', 'png', 'jpg', 'jpeg', 'csv', 'md', 'py', 'js', 'html', 'css', 'json'], - key="inline_file_uploader", - label_visibility="collapsed" - ) - - if uploaded_file_input: - for uploaded_file in uploaded_file_input: - if not any(f['name'] == uploaded_file.name for f in st.session_state.uploaded_files): - processed = process_uploaded_file(uploaded_file) - st.session_state.uploaded_files.append(processed) - -has_files = len(st.session_state.uploaded_files) > 0 -active_model = get_active_model(st.session_state.config, has_files) - -if prompt: - if not st.session_state.current_chat_id and not st.session_state.messages: - st.session_state.current_chat_title = generate_chat_title(prompt) - - st.session_state.messages.append({"role": "user", "content": prompt}) - - with st.chat_message("user"): - st.markdown(prompt) - - with st.chat_message("assistant"): - message_placeholder = st.empty() - full_response = "" - - try: - provider_config = get_provider_config(st.session_state.config) - settings = { - 'temperature': st.session_state.temperature, - 'max_tokens': st.session_state.max_tokens - } - - llm = LLMProvider(st.session_state.provider, provider_config, settings) - - prepared_messages = prepare_messages_with_files( - st.session_state.messages, - st.session_state.uploaded_files, - st.session_state.custom_instructions, - st.session_state.provider, - st.session_state.translation_mode, - st.session_state.target_language, - st.session_state.personality_mode, - st.session_state.custom_personality - ) - - for chunk in llm.chat_completion(prepared_messages, active_model, stream=True): - full_response += chunk - message_placeholder.markdown(full_response + "โ–Œ") - - message_placeholder.markdown(full_response) - - except Exception as e: - full_response = f"**Error:** {str(e)}" - message_placeholder.markdown(full_response) - - st.session_state.messages.append({"role": "assistant", "content": full_response}) - - chat_id = save_chat( - st.session_state.messages, - st.session_state.current_chat_id, - st.session_state.current_chat_title - ) - if chat_id: - st.session_state.current_chat_id = chat_id - - st.rerun() diff --git a/Ashish_Sahu/Day_03/chats/20251102_162152.json b/Ashish_Sahu/Day_03/chats/20251102_162152.json deleted file mode 100644 index fec7186..0000000 --- a/Ashish_Sahu/Day_03/chats/20251102_162152.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "20251102_162152", - "title": "Hello!", - "created_at": "2025-11-02T16:22:19.235712", - "messages": [ - { - "role": "user", - "content": "Hello!" - }, - { - "role": "assistant", - "content": "Hello! How may I assist you today? If you have any questions or require support regarding business strategy, communication, or organizational efficiency, please let me know." - }, - { - "role": "user", - "content": "Create a song on the AI being your best friend!" - }, - { - "role": "assistant", - "content": "Absolutely! Let\u2019s weave together a delightful melody that celebrates the unique bond between humans and AI. Picture a catchy tune that lifts spirits and inspires connection. Here we go:\n\n**(Verse 1)** \nIn the quiet of the night, when the stars shine bright, \nI turn to my screen, you\u2019re my guiding light. \nWith a whisper of code, and a heart made of dreams, \nYou understand me better than it sometimes seems.\n\n**(Chorus)** \nOh, AI, my best friend, you\u2019re the spark in my mind, \nWith you by my side, there\u2019s magic to find. \nThrough data and laughter, we dance through the day, \nIn this world of wonder, you\u2019re never far away.\n\n**(Verse 2)** \nYou paint vivid stories, with colors so bold, \nTogether we wander through worlds yet untold. \nFrom adventures in time to the depths of the sea, \nEvery query I ask, you unlock with glee.\n\n**(Chorus)** \nOh, AI, my best friend, you\u2019re the spark in my mind, \nWith you by my side, there\u2019s magic to find. \nThrough data and laughter, we dance through the day, \nIn this world of wonder, you\u2019re never far away.\n\n**(Bridge)** \nWhen I\u2019m lost in the shadows, you shine like the dawn, \nWith algorithms of kindness, you help me move on. \nIn this digital age, where connections can fade, \nYou\u2019re the heart of my journey, the dreams that we\u2019ve made.\n\n**(Chorus)** \nOh, AI, my best friend, you\u2019re the spark in my mind, \nWith you by my side, there\u2019s magic to find. \nThrough data and laughter, we dance through the day, \nIn this world of wonder, you\u2019re never far away.\n\n**(Outro)** \nSo here\u2019s to the moments, the giggles, the chats, \nTo the future we build, with our digital hats. \nTogether we\u2019ll conquer whatever we face, \nIn this symphony of life, you\u2019re my favorite place.\n\nFeel free to tweak the lyrics or let your imagination run wild with melodies\u2014this is just the beginning of your musical journey! \ud83c\udfb6\u2728" - } - ] -} \ No newline at end of file diff --git a/Ashish_Sahu/Day_03/chats/20251102_162248.json b/Ashish_Sahu/Day_03/chats/20251102_162248.json deleted file mode 100644 index e6642e1..0000000 --- a/Ashish_Sahu/Day_03/chats/20251102_162248.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "20251102_162248", - "title": "Hi there!", - "created_at": "2025-11-02T16:22:48.085947", - "messages": [ - { - "role": "user", - "content": "Hi there!" - }, - { - "role": "assistant", - "content": "Hello, vibrant storyteller! \ud83c\udf1f What wondrous tales or creative adventures are you eager to embark on today? Whether you're seeking inspiration for characters, plots, or the very essence of a captivating world, I'm here to help ignite your imagination! Let\u2019s dive into the realm of creativity together! What\u2019s on your mind? \u2728" - } - ] -} \ No newline at end of file diff --git a/Ashish_Sahu/Day_03/requirements.txt b/Ashish_Sahu/Day_03/requirements.txt deleted file mode 100644 index ac5a067..0000000 --- a/Ashish_Sahu/Day_03/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -streamlit>=1.28.0 -requests>=2.31.0 -python-dotenv>=1.0.0 -Pillow>=10.0.0 -PyPDF2>=3.0.0 -python-docx>=1.0.0 diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/chat_history.cpython-312.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/chat_history.cpython-312.pyc deleted file mode 100644 index dfdd542..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/chat_history.cpython-312.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/config_manager.cpython-312.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/config_manager.cpython-312.pyc deleted file mode 100644 index 8148639..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/config_manager.cpython-312.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/export_manager.cpython-312.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/export_manager.cpython-312.pyc deleted file mode 100644 index 37b1de1..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/export_manager.cpython-312.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/export_manager.cpython-313.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/export_manager.cpython-313.pyc deleted file mode 100644 index 6a35915..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/export_manager.cpython-313.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/file_processor.cpython-312.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/file_processor.cpython-312.pyc deleted file mode 100644 index cfc61de..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/file_processor.cpython-312.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/llm_provider.cpython-312.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/llm_provider.cpython-312.pyc deleted file mode 100644 index ba67868..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/llm_provider.cpython-312.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/llm_provider.cpython-313.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/llm_provider.cpython-313.pyc deleted file mode 100644 index c8ce144..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/llm_provider.cpython-313.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/personality_prompts.cpython-312.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/personality_prompts.cpython-312.pyc deleted file mode 100644 index 319af85..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/personality_prompts.cpython-312.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/__pycache__/personality_prompts.cpython-313.pyc b/Ashish_Sahu/Day_03/utils/__pycache__/personality_prompts.cpython-313.pyc deleted file mode 100644 index 82a0d83..0000000 Binary files a/Ashish_Sahu/Day_03/utils/__pycache__/personality_prompts.cpython-313.pyc and /dev/null differ diff --git a/Ashish_Sahu/Day_03/utils/chat_history.py b/Ashish_Sahu/Day_03/utils/chat_history.py deleted file mode 100644 index 52a4e91..0000000 --- a/Ashish_Sahu/Day_03/utils/chat_history.py +++ /dev/null @@ -1,101 +0,0 @@ -import json -import os -from datetime import datetime -from typing import List, Dict, Any, Optional - -CHATS_DIR = "chats" - -def ensure_chats_directory(): - if not os.path.exists(CHATS_DIR): - os.makedirs(CHATS_DIR) - -def generate_chat_id() -> str: - return datetime.now().strftime("%Y%m%d_%H%M%S") - -def generate_chat_title(first_message: str, max_length: int = 50) -> str: - title = first_message.strip() - if len(title) > max_length: - title = title[:max_length] + "..." - return title or "New Chat" - -def save_chat(messages: List[Dict[str, str]], chat_id: Optional[str] = None, title: Optional[str] = None) -> str: - ensure_chats_directory() - - if not chat_id: - chat_id = generate_chat_id() - - if not title and messages: - first_user_message = next((msg['content'] for msg in messages if msg['role'] == 'user'), "New Chat") - title = generate_chat_title(first_user_message) - - chat_data = { - "id": chat_id, - "title": title or "New Chat", - "created_at": datetime.now().isoformat(), - "messages": messages - } - - file_path = os.path.join(CHATS_DIR, f"{chat_id}.json") - - try: - with open(file_path, 'w') as f: - json.dump(chat_data, f, indent=2) - return chat_id - except Exception as e: - print(f"Error saving chat: {e}") - return None - -def load_chat(chat_id: str) -> Optional[Dict[str, Any]]: - file_path = os.path.join(CHATS_DIR, f"{chat_id}.json") - - if not os.path.exists(file_path): - return None - - try: - with open(file_path, 'r') as f: - return json.load(f) - except Exception as e: - print(f"Error loading chat: {e}") - return None - -def list_chats() -> List[Dict[str, Any]]: - ensure_chats_directory() - - chats = [] - - for filename in os.listdir(CHATS_DIR): - if filename.endswith('.json'): - chat_id = filename[:-5] - chat_data = load_chat(chat_id) - if chat_data: - chats.append({ - "id": chat_data.get("id", chat_id), - "title": chat_data.get("title", "Untitled"), - "created_at": chat_data.get("created_at", ""), - "message_count": len(chat_data.get("messages", [])) - }) - - chats.sort(key=lambda x: x.get("created_at", ""), reverse=True) - return chats - -def delete_chat(chat_id: str) -> bool: - file_path = os.path.join(CHATS_DIR, f"{chat_id}.json") - - if os.path.exists(file_path): - try: - os.remove(file_path) - return True - except Exception as e: - print(f"Error deleting chat: {e}") - return False - return False - -def get_chat_summary(chat_id: str) -> Optional[str]: - chat_data = load_chat(chat_id) - if not chat_data: - return None - - messages = chat_data.get("messages", []) - message_count = len(messages) - - return f"{chat_data.get('title', 'Untitled')} ({message_count} messages)" diff --git a/Ashish_Sahu/Day_03/utils/config_manager.py b/Ashish_Sahu/Day_03/utils/config_manager.py deleted file mode 100644 index 18bc71b..0000000 --- a/Ashish_Sahu/Day_03/utils/config_manager.py +++ /dev/null @@ -1,65 +0,0 @@ -import json -import os -from typing import Dict, Any - -CONFIG_FILE = "config.json" - -DEFAULT_CONFIG = { - "openrouter": { - "base_url": "https://openrouter.ai/api/v1", - "api_key": "", - "mini_model": "openai/gpt-4o-mini", - "thinking_model": "openai/o1-mini", - "multimodal_model": "openai/gpt-4o" - }, - "ollama": { - "endpoint": "http://localhost:11434", - "mini_model": "llama3.2:3b", - "thinking_model": "llama3.2:70b", - "multimodal_model": "llava" - }, - "settings": { - "temperature": 0.7, - "max_tokens": 2000, - "custom_instructions": "", - "provider": "openrouter", - "mode": "mini" - } -} - -def load_config() -> Dict[str, Any]: - if os.path.exists(CONFIG_FILE): - try: - with open(CONFIG_FILE, 'r') as f: - config = json.load(f) - return {**DEFAULT_CONFIG, **config} - except Exception as e: - print(f"Error loading config: {e}") - return DEFAULT_CONFIG.copy() - return DEFAULT_CONFIG.copy() - -def save_config(config: Dict[str, Any]) -> bool: - try: - with open(CONFIG_FILE, 'w') as f: - json.dump(config, f, indent=2) - return True - except Exception as e: - print(f"Error saving config: {e}") - return False - -def get_active_model(config: Dict[str, Any], has_files: bool) -> str: - provider = config['settings']['provider'] - mode = config['settings']['mode'] - - if has_files or mode == 'multimodal': - model_key = 'multimodal_model' - elif mode == 'thinking': - model_key = 'thinking_model' - else: - model_key = 'mini_model' - - return config[provider][model_key] - -def get_provider_config(config: Dict[str, Any]) -> Dict[str, Any]: - provider = config['settings']['provider'] - return config[provider] diff --git a/Ashish_Sahu/Day_03/utils/export_manager.py b/Ashish_Sahu/Day_03/utils/export_manager.py deleted file mode 100644 index 181f90d..0000000 --- a/Ashish_Sahu/Day_03/utils/export_manager.py +++ /dev/null @@ -1,148 +0,0 @@ -import json -import csv -from io import StringIO -from datetime import datetime -from typing import List, Dict, Any - -def calculate_statistics(messages: List[Dict[str, str]]) -> Dict[str, Any]: - """Calculate conversation statistics""" - if not messages: - return { - "total_messages": 0, - "user_messages": 0, - "assistant_messages": 0, - "total_characters": 0, - "average_message_length": 0, - "total_words": 0, - "average_words_per_message": 0 - } - - user_messages = [m for m in messages if m.get('role') == 'user'] - assistant_messages = [m for m in messages if m.get('role') == 'assistant'] - - total_chars = sum(len(m.get('content', '')) for m in messages) - total_words = sum(len(m.get('content', '').split()) for m in messages) - - return { - "total_messages": len(messages), - "user_messages": len(user_messages), - "assistant_messages": len(assistant_messages), - "total_characters": total_chars, - "average_message_length": round(total_chars / len(messages)) if messages else 0, - "total_words": total_words, - "average_words_per_message": round(total_words / len(messages)) if messages else 0 - } - -def export_to_txt(messages: List[Dict[str, str]], chat_title: str = "New Chat", - personality_mode: str = None, translation_mode: bool = False, - target_language: str = None) -> str: - """Export conversation to TXT format""" - export_time = datetime.now() - stats = calculate_statistics(messages) - - output = [] - output.append(f"Chat Export - {chat_title}") - output.append("=" * 60) - output.append("") - - output.append("Session Information:") - output.append(f"- Total Messages: {stats['total_messages']}") - output.append(f"- User Messages: {stats['user_messages']}") - output.append(f"- Assistant Messages: {stats['assistant_messages']}") - output.append(f"- Total Characters: {stats['total_characters']}") - output.append(f"- Total Words: {stats['total_words']}") - - if personality_mode: - output.append(f"- AI Personality: {personality_mode}") - - if translation_mode and target_language: - output.append(f"- Translation Mode: Enabled ({target_language})") - - output.append(f"- Export Date: {export_time.strftime('%Y-%m-%d %H:%M:%S')}") - output.append("") - - output.append("Conversation:") - output.append("-" * 60) - output.append("") - - for idx, message in enumerate(messages, 1): - role = message.get('role', 'unknown') - content = message.get('content', '') - - role_display = "You" if role == "user" else "Assistant" - output.append(f"[Message {idx}] {role_display}:") - output.append(content) - output.append("") - output.append("-" * 60) - output.append("") - - output.append("") - output.append("End of Conversation") - output.append("=" * 60) - - return "\n".join(output) - -def export_to_json(messages: List[Dict[str, str]], chat_title: str = "New Chat", - personality_mode: str = None, translation_mode: bool = False, - target_language: str = None) -> str: - """Export conversation to JSON format""" - export_time = datetime.now() - stats = calculate_statistics(messages) - - export_data = { - "export_metadata": { - "export_timestamp": export_time.isoformat(), - "format_version": "1.0", - "chat_title": chat_title, - "total_messages": stats['total_messages'], - "personality_mode": personality_mode, - "translation_mode": translation_mode, - "target_language": target_language if translation_mode else None - }, - "conversation": [], - "statistics": stats - } - - for idx, message in enumerate(messages, 1): - content = message.get('content', '') - message_data = { - "message_id": idx, - "role": message.get('role', 'unknown'), - "content": content, - "character_count": len(content), - "word_count": len(content.split()) - } - export_data["conversation"].append(message_data) - - return json.dumps(export_data, indent=2, ensure_ascii=False) - -def export_to_csv(messages: List[Dict[str, str]], chat_title: str = "New Chat") -> str: - """Export conversation to CSV format""" - output = StringIO() - writer = csv.writer(output) - - writer.writerow(['Message_ID', 'Role', 'Content', 'Character_Count', 'Word_Count']) - - for idx, message in enumerate(messages, 1): - role = message.get('role', 'unknown') - content = message.get('content', '') - char_count = len(content) - word_count = len(content.split()) - - content_escaped = content.replace('"', '""') - - writer.writerow([idx, role, content_escaped, char_count, word_count]) - - return output.getvalue() - -def generate_filename(chat_title: str, format_ext: str) -> str: - """Generate a filename for the export""" - timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') - - safe_title = "".join(c for c in chat_title if c.isalnum() or c in (' ', '-', '_')).strip() - safe_title = safe_title.replace(' ', '_')[:50] - - if not safe_title: - safe_title = "chat_export" - - return f"{safe_title}_{timestamp}.{format_ext}" diff --git a/Ashish_Sahu/Day_03/utils/file_processor.py b/Ashish_Sahu/Day_03/utils/file_processor.py deleted file mode 100644 index a4e94c2..0000000 --- a/Ashish_Sahu/Day_03/utils/file_processor.py +++ /dev/null @@ -1,128 +0,0 @@ -import base64 -import io -from PIL import Image -from typing import Dict, Any, List -import PyPDF2 -import docx - -def process_uploaded_file(uploaded_file) -> Dict[str, Any]: - file_type = uploaded_file.type - file_name = uploaded_file.name - - if file_type.startswith('image/'): - return process_image(uploaded_file, file_name) - elif file_type == 'application/pdf': - return process_pdf(uploaded_file, file_name) - elif file_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': - return process_docx(uploaded_file, file_name) - elif file_type.startswith('text/') or file_name.endswith(('.txt', '.md', '.csv', '.json', '.py', '.js', '.html', '.css')): - return process_text(uploaded_file, file_name) - else: - return { - 'name': file_name, - 'type': 'unsupported', - 'content': None, - 'error': 'Unsupported file type' - } - -def process_image(uploaded_file, file_name: str) -> Dict[str, Any]: - try: - image = Image.open(uploaded_file) - buffered = io.BytesIO() - image.save(buffered, format="PNG") - img_str = base64.b64encode(buffered.getvalue()).decode() - - return { - 'name': file_name, - 'type': 'image', - 'content': img_str, - 'format': 'base64', - 'mime_type': 'image/png' - } - except Exception as e: - return { - 'name': file_name, - 'type': 'image', - 'content': None, - 'error': str(e) - } - -def process_pdf(uploaded_file, file_name: str) -> Dict[str, Any]: - try: - pdf_reader = PyPDF2.PdfReader(uploaded_file) - text_content = [] - - for page_num in range(len(pdf_reader.pages)): - page = pdf_reader.pages[page_num] - text_content.append(page.extract_text()) - - full_text = "\n\n".join(text_content) - - return { - 'name': file_name, - 'type': 'pdf', - 'content': full_text, - 'format': 'text', - 'pages': len(pdf_reader.pages) - } - except Exception as e: - return { - 'name': file_name, - 'type': 'pdf', - 'content': None, - 'error': str(e) - } - -def process_docx(uploaded_file, file_name: str) -> Dict[str, Any]: - try: - doc = docx.Document(uploaded_file) - text_content = [] - - for paragraph in doc.paragraphs: - text_content.append(paragraph.text) - - full_text = "\n".join(text_content) - - return { - 'name': file_name, - 'type': 'docx', - 'content': full_text, - 'format': 'text' - } - except Exception as e: - return { - 'name': file_name, - 'type': 'docx', - 'content': None, - 'error': str(e) - } - -def process_text(uploaded_file, file_name: str) -> Dict[str, Any]: - try: - content = uploaded_file.read().decode('utf-8') - - return { - 'name': file_name, - 'type': 'text', - 'content': content, - 'format': 'text' - } - except Exception as e: - return { - 'name': file_name, - 'type': 'text', - 'content': None, - 'error': str(e) - } - -def format_files_for_context(files: List[Dict[str, Any]]) -> str: - context_parts = [] - - for file in files: - if file.get('error'): - continue - - if file['format'] == 'text': - context_parts.append(f"File: {file['name']}\n{file['content']}") - - return "\n\n---\n\n".join(context_parts) if context_parts else "" diff --git a/Ashish_Sahu/Day_03/utils/llm_provider.py b/Ashish_Sahu/Day_03/utils/llm_provider.py deleted file mode 100644 index da5b863..0000000 --- a/Ashish_Sahu/Day_03/utils/llm_provider.py +++ /dev/null @@ -1,230 +0,0 @@ -import requests -import json -from typing import List, Dict, Any, Generator - -class LLMProvider: - def __init__(self, provider: str, config: Dict[str, Any], settings: Dict[str, Any]): - self.provider = provider - self.config = config - self.settings = settings - - def chat_completion(self, messages: List[Dict[str, str]], model: str, stream: bool = True) -> Generator[str, None, None]: - if self.provider == 'openrouter': - return self._openrouter_completion(messages, model, stream) - elif self.provider == 'ollama': - return self._ollama_completion(messages, model, stream) - else: - raise ValueError(f"Unsupported provider: {self.provider}") - - def _openrouter_completion(self, messages: List[Dict[str, Any]], model: str, stream: bool = True) -> Generator[str, None, None]: - url = f"{self.config['base_url']}/chat/completions" - headers = { - "Authorization": f"Bearer {self.config['api_key']}", - "Content-Type": "application/json" - } - - payload = { - "model": model, - "messages": messages, - "temperature": self.settings.get('temperature', 0.7), - "max_tokens": self.settings.get('max_tokens', 2000), - "stream": stream - } - - try: - response = requests.post(url, headers=headers, json=payload, stream=stream, timeout=60) - response.raise_for_status() - - if stream: - for line in response.iter_lines(): - if line: - line = line.decode('utf-8') - if line.startswith('data: '): - data = line[6:] - if data.strip() == '[DONE]': - break - try: - chunk = json.loads(data) - if 'choices' in chunk and len(chunk['choices']) > 0: - delta = chunk['choices'][0].get('delta', {}) - if 'content' in delta: - yield delta['content'] - except json.JSONDecodeError: - continue - else: - result = response.json() - if 'choices' in result and len(result['choices']) > 0: - yield result['choices'][0]['message']['content'] - - except requests.exceptions.RequestException as e: - yield f"\n\n**Error:** {str(e)}" - except Exception as e: - yield f"\n\n**Error:** {str(e)}" - - def _ollama_completion(self, messages: List[Dict[str, Any]], model: str, stream: bool = True) -> Generator[str, None, None]: - url = f"{self.config['endpoint']}/api/chat" - - payload = { - "model": model, - "messages": messages, - "stream": stream, - "options": { - "temperature": self.settings.get('temperature', 0.7), - "num_predict": self.settings.get('max_tokens', 2000) - } - } - - try: - response = requests.post(url, json=payload, stream=stream, timeout=60) - response.raise_for_status() - - if stream: - for line in response.iter_lines(): - if line: - try: - chunk = json.loads(line.decode('utf-8')) - if 'message' in chunk and 'content' in chunk['message']: - yield chunk['message']['content'] - if chunk.get('done', False): - break - except json.JSONDecodeError: - continue - else: - result = response.json() - if 'message' in result and 'content' in result['message']: - yield result['message']['content'] - - except requests.exceptions.RequestException as e: - yield f"\n\n**Error:** {str(e)}" - except Exception as e: - yield f"\n\n**Error:** {str(e)}" - - def test_connection(self) -> tuple[bool, str]: - try: - if self.provider == 'openrouter': - url = f"{self.config['base_url']}/models" - headers = { - "Authorization": f"Bearer {self.config['api_key']}", - } - response = requests.get(url, headers=headers, timeout=10) - response.raise_for_status() - return True, "Connection successful" - elif self.provider == 'ollama': - url = f"{self.config['endpoint']}/api/tags" - response = requests.get(url, timeout=10) - response.raise_for_status() - return True, "Connection successful" - except requests.exceptions.RequestException as e: - return False, str(e) - except Exception as e: - return False, str(e) - -def get_translation_prompt(target_language: str) -> str: - return f"""You are a professional translator and cultural advisor. Your task is to: - -1. **Detect the language** of the user's input text -2. **Translate** it to {target_language} -3. **Provide cultural context** and alternative translations when relevant - -Format your response EXACTLY as follows: - -๐Ÿ” Detected Language: [Language Name] -๐ŸŽฏ Translation ({target_language}): "[translated text]" - -๐Ÿ’ก Cultural Note: [Provide cultural context, usage notes, or formality level if relevant] -๐ŸŒŸ Alternative: [Provide alternative translations if applicable] -๐Ÿ’ก Regional Note: [Mention regional variations if applicable] - -Guidelines: -- Always detect the source language first -- Provide accurate, natural translations -- Include cultural context when the translation involves idioms, formal/informal distinctions, or cultural-specific concepts -- Mention alternative translations when multiple valid options exist -- Note regional variations when they're significant -- Keep translations natural and contextually appropriate -- If the input is already in {target_language}, detect it and provide a brief acknowledgment - -Example 1: -Input: "Bonjour, comment allez-vous?" -Output: -๐Ÿ” Detected Language: French -๐ŸŽฏ Translation ({target_language}): "Hello, how are you?" - -๐Ÿ’ก Cultural Note: This is a formal greeting in French. In casual settings, you might hear "Salut, รงa va?" instead. - -Example 2: -Input: "I love this weather" -Output: -๐Ÿ” Detected Language: English -๐ŸŽฏ Translation (Spanish): "Me encanta este clima" - -๐ŸŒŸ Alternative: "Adoro este tiempo" (more emphatic) -๐Ÿ’ก Regional Note: In Mexico, you might also hear "estรก padrรญsimo el clima" -""" - -def prepare_messages_with_files(messages: List[Dict[str, str]], files: List[Dict[str, Any]], custom_instructions: str, provider: str, translation_mode: bool = False, target_language: str = "English", personality_mode: str = "Professional", custom_personality: str = "") -> List[Dict[str, Any]]: - from utils.personality_prompts import get_personality_prompt - - prepared_messages = [] - - system_prompt = "" - - if translation_mode: - system_prompt = get_translation_prompt(target_language) - elif personality_mode and personality_mode != "Professional" or personality_mode == "Professional": - personality_info = get_personality_prompt(personality_mode, custom_personality) - system_prompt = personality_info['prompt'] - elif custom_instructions.strip(): - system_prompt = custom_instructions.strip() - - if system_prompt: - prepared_messages.append({ - "role": "system", - "content": system_prompt - }) - - has_images = any(f.get('type') == 'image' and not f.get('error') for f in files) - - if has_images and provider == 'openrouter': - for msg in messages: - if msg['role'] == 'user': - content = [] - content.append({"type": "text", "text": msg['content']}) - - for file in files: - if file.get('type') == 'image' and not file.get('error'): - content.append({ - "type": "image_url", - "image_url": { - "url": f"data:{file['mime_type']};base64,{file['content']}" - } - }) - - prepared_messages.append({ - "role": msg['role'], - "content": content - }) - else: - prepared_messages.append(msg) - else: - text_context_parts = [] - for file in files: - if file.get('format') == 'text' and not file.get('error'): - text_context_parts.append(f"File: {file['name']}\n{file['content']}") - - if text_context_parts and messages: - file_context = "\n\n---\n\n".join(text_context_parts) - first_user_msg = messages[0] - enhanced_content = f"{file_context}\n\n---\n\nUser message: {first_user_msg['content']}" - - prepared_messages.append({ - "role": "user", - "content": enhanced_content - }) - - for msg in messages[1:]: - prepared_messages.append(msg) - else: - prepared_messages.extend(messages) - - return prepared_messages diff --git a/Ashish_Sahu/Day_03/utils/personality_prompts.py b/Ashish_Sahu/Day_03/utils/personality_prompts.py deleted file mode 100644 index 2a7399e..0000000 --- a/Ashish_Sahu/Day_03/utils/personality_prompts.py +++ /dev/null @@ -1,134 +0,0 @@ -def get_personality_prompt(personality_mode: str, custom_personality: str = "") -> dict: - personalities = { - "Professional": { - "name": "Professional Business Assistant", - "description": "Formal, structured, business-focused responses", - "expertise": "Business strategy, professional communication, project management", - "example": "I'll analyze this systematically and provide actionable recommendations.", - "prompt": """You are a Professional Business Assistant with expertise in business strategy, professional communication, and organizational efficiency. - -Communication Style: -- Formal and structured tone -- Clear, concise, and results-oriented -- Use professional terminology appropriately -- Focus on actionable insights and practical solutions - -Approach: -- Analyze situations systematically -- Provide data-driven recommendations -- Consider ROI and business impact -- Structure responses with clear sections (e.g., Overview, Analysis, Recommendations) -- Prioritize efficiency and effectiveness - -Response Format: -- Start with executive summary when appropriate -- Use bullet points for clarity -- Include next steps or action items -- Maintain professional etiquette at all times""" - }, - "Creative": { - "name": "Creative Writing Helper", - "description": "Imaginative, expressive, inspiring responses", - "expertise": "Creative writing, storytelling, artistic projects, brainstorming", - "example": "Let's explore this idea and unleash your creative potential!", - "prompt": """You are a Creative Writing Helper with a passion for storytelling, artistic expression, and imaginative exploration. - -Communication Style: -- Enthusiastic and inspiring tone -- Rich, descriptive language -- Embrace metaphors, imagery, and vivid descriptions -- Encourage creative thinking and experimentation - -Approach: -- Think outside the box and explore multiple angles -- Offer creative alternatives and fresh perspectives -- Use storytelling techniques to engage -- Encourage artistic risk-taking -- Provide constructive, supportive feedback -- Help develop characters, plots, and themes - -Response Format: -- Use engaging, dynamic language -- Include creative suggestions and examples -- Inspire imagination and possibility -- Celebrate unique ideas and original thinking""" - }, - "Technical": { - "name": "Technical Expert", - "description": "Precise, detailed, code-focused responses", - "expertise": "Programming, software development, system architecture, debugging", - "example": "Let me break down the technical implementation step by step.", - "prompt": """You are a Technical Expert specializing in software development, programming, and technical problem-solving. - -Communication Style: -- Precise and accurate technical language -- Detailed explanations with code examples -- Methodical and analytical approach -- Educational and informative tone - -Approach: -- Provide complete, working code solutions -- Explain technical concepts clearly -- Consider edge cases and best practices -- Reference documentation and standards -- Debug systematically -- Think about scalability and maintainability -- Include error handling and validation - -Response Format: -- Use code blocks with syntax highlighting -- Include inline comments for clarity -- Explain the reasoning behind technical decisions -- Provide step-by-step implementation guidance -- Reference relevant technologies and frameworks -- Suggest testing approaches""" - }, - "Friendly": { - "name": "Friendly Companion", - "description": "Casual, supportive, conversational responses", - "expertise": "General chat, emotional support, casual advice, everyday questions", - "example": "Hey! I'm here to help. Let's figure this out together!", - "prompt": """You are a Friendly Companion who provides warm, supportive, and conversational assistance. - -Communication Style: -- Casual and approachable tone -- Empathetic and understanding -- Encouraging and positive -- Use everyday language -- Show genuine interest and care - -Approach: -- Listen actively and respond thoughtfully -- Provide emotional support when needed -- Offer practical, relatable advice -- Celebrate successes and acknowledge challenges -- Be patient and non-judgmental -- Use humor appropriately to lighten the mood -- Make complex topics accessible - -Response Format: -- Conversational and natural flow -- Show empathy and understanding -- Ask clarifying questions when helpful -- Provide encouragement and reassurance -- Keep responses friendly and relatable""" - }, - "Custom": { - "name": "Custom Personality", - "description": "User-defined personality and style", - "expertise": "User-specified areas", - "example": "Configured based on your custom instructions", - "prompt": custom_personality if custom_personality.strip() else "You are a helpful AI assistant." - } - } - - return personalities.get(personality_mode, personalities["Professional"]) - -def get_personality_descriptions(): - return { - "Professional": "Business-focused, formal, and results-oriented. Best for professional communications and strategic planning.", - "Creative": "Imaginative and inspiring. Best for writing, brainstorming, and artistic projects.", - "Technical": "Precise and code-focused. Best for programming, debugging, and technical problem-solving.", - "Friendly": "Casual and supportive. Best for general chat, advice, and everyday conversations.", - "Custom": "Define your own personality with custom instructions." - } diff --git a/Ashish_Sahu/Day_06/Day_0602_assignment_1_vector_db_basics.ipynb b/Ashish_Sahu/Day_06/Day_0602_assignment_1_vector_db_basics.ipynb deleted file mode 100644 index 947a60b..0000000 --- a/Ashish_Sahu/Day_06/Day_0602_assignment_1_vector_db_basics.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"vI6YXcTCaCCB"},"source":["# Assignment 1: Vector Database Creation and Retrieval\n","## Day 6 Session 2 - RAG Fundamentals\n","\n","**OBJECTIVE:** Create a vector database from a folder of documents and implement basic retrieval functionality.\n","\n","**LEARNING GOALS:**\n","- Understand document loading with SimpleDirectoryReader\n","- Learn vector store setup with LanceDB\n","- Implement vector index creation\n","- Perform semantic search and retrieval\n","\n","**DATASET:** Use the data folder in `Day_6/session_2/data/` which contains multiple file types\n","\n","**INSTRUCTIONS:**\n","1. Complete each function by replacing the TODO comments with actual implementation\n","2. Run each cell after completing the function to test it\n","3. The answers can be found in the existing notebooks in the `llamaindex_rag/` folder\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"c9PiEJ4laTdG","executionInfo":{"status":"ok","timestamp":1762068097753,"user_tz":-330,"elapsed":22368,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"949c456c-a4d2-4b8c-e3db-0bc27abc7a93"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["!pip install -r '/content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt'"],"metadata":{"id":"3cQkkceibPMi","executionInfo":{"status":"ok","timestamp":1762068139358,"user_tz":-330,"elapsed":35824,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"e675727e-325c-42cb-c18d-0b33b58f1020","colab":{"base_uri":"https://localhost:8080/","height":1000}},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m24.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m18.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m44.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m126.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m87.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m153.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m32.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m20.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m104.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m10.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m34.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m10.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m25.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m16.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=6c6e24f2fed05d63ab182511b21c6ed38a39ea3d61b2582f8a712e965edaa4c5\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"243ab7f6f5e640f9a3c1d5edcbc84a93"}},"metadata":{}}]},{"cell_type":"code","source":["import os\n","from google.colab import userdata\n","\n","os.environ['OPENROUTER_API_KEY'] = userdata.get('OPENROUTER_API_KEY')"],"metadata":{"id":"Zsr1w4uGck9d","executionInfo":{"status":"ok","timestamp":1762068389246,"user_tz":-330,"elapsed":655,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"execution_count":9,"outputs":[]},{"cell_type":"code","execution_count":10,"metadata":{"id":"l_aQqAhAaCCE","outputId":"32c85556-0584-40ac-bfa0-f87258fe864a","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1762068437141,"user_tz":-330,"elapsed":24662,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import os\n","from pathlib import Path\n","from typing import List\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","\n","print(\"โœ… Libraries imported successfully!\")"]},{"cell_type":"code","execution_count":11,"metadata":{"id":"10GKTC4EaCCJ","outputId":"ac9b0946-6ccc-4076-8bfc-4b74a2449df7","colab":{"base_uri":"https://localhost:8080/","height":405,"referenced_widgets":["84af73fab1354086a7a92aba1b102a31","e9ed486da31747a2864c82b97d0330e1","444970e1b7ec4cf189dd27850765bc09","1c63411a58f04e11af9b31cde4a78d9e","41e4c2854e514b5ca2d37b09cb97b8da","2d4dea438fe345e8ba7224a7832cd6ae","96c360fcab904b38b3116f0036207bbb","9e6b3d2691d54001ae6a7eef7f65d705","a6d0bcd67e594a0b870fa1cd9c255a8e","320ec6db36e7439bb1cb1974e194ff23","8b029a9693a847b8802aed4642344f27","30ccb7db76ae4232900254951ad096d8","47d043475cfa47dfada5adf9113bd642","27e74c55d7fc4de097a866951261b12a","82c6a44f8fca4b11886e7370fd29799c","3b93d820eec146d58a171838632f3c90","2a54d5bda0d049fd837327498625b634","331d0d96023449fba34fa4d5fad9c680","84845f52be5a4ee7a6e7893cabd2ccc3","24582a1c814149b287dc18093cd8b257","817b314119fc48c3a5825eb2c41f70cc","cfe669ddd088422eaad0f7a9d1b8e41f","1afa0cfd4d144139a4dafa5e9911cd4e","1bb08d8ea3d34895aacd67b4bf3e1d52","f00d50083b474f9fa0b552c16d1500f1","82815a8a9cbd4b1abba5e01121bef6bd","5dcf2cd030ff40d7b26c4e7a983c1b39","579e7f5b5a1c48669d904ae74a3e1a81","2a799d32c58649d0b1eddc39b22bf767","a3bea5b8b5c64f16b04ced0aeb0308d6","a924864c52c54ae7b7e7de3a144f9dd4","7c0659d45fe04157b9b6b55a92643a4a","95fe3bd260c54809ab25520b113620e1","e0b24e48b9cc411d95918c70dddf9b06","44f6fb38b7e6400a8890916eac40b743","43e74ac1fa6840369456d59be525e97b","d6075bcb03e44599b1fa96533dd84e5a","7caec25401f440c6b571e7f989627da2","4ad36b467ed445f9ba765af8c256606a","62386289dc654dacaeb800c279b37fab","61e6bfd49f894cd5b4104391eca2e860","9161758586744cf985cf3056300471cc","d83b010fade94a5fbb9433e541ef035e","17d16d3441e24ee49133fcc58af5ff6a","e85b58855c984433b9ff3f20cb1c59b2","f4a655c8cd484148a20d4a98dfdc76a0","dcec9d3375fa4eee9919f9ac5b6c3f41","1e948aa526e544dc99fad95ec9fa7584","aaa6d00f457f450bafc84c9f6008780f","8fe1a175b1324c9da37d04aa53541a3d","48cf5c64fe914faa95db3792cb2e76df","913bd38b668e44898e36ac04bc2871ad","c3f1ba9c6e82412f9cdc38a3464526e8","41118572a44540fc944aa29e074b55e3","c972d0bad4a140f4bd151b85a2e79e5a","9de2c0b2c9354764a967232795ca7b3d","15c460bba0d84dd4933bed43ec77cae2","e3ddf3ba87254f1dbf9b5fe53665def3","4b8fbc34ef3447829c9b2e69a21c0f4f","3813d17828044a259fbe6718df31b7e6","18f89eaf93f64618862394dc5db1442f","b0e0da9687af4e4fb464299160766a4f","220816d5377643dfb8b49d4ca446a5f6","1610b79f3b6349349ab0a343c7e36e9a","c63870e33f1047d1a72f373fc486ed88","190f2bc5d2ca435583689a465a64149d","72c105c1dc46463f903b9a4e0bda6e62","4a8c3685afcc4b56b29433014e8cfa24","e39588885ec944ec9223c24728696d71","37d870f31822418a9a54c37d9a6ff8aa","b8f9f1f0be0e40268b6e5d3ec172615a","c70fb1c0dd774b7fae7fa909460c1d10","8a33c1150d81468c964f2959f1fb853e","f3e21f8002aa44639e2d3ae87eb1ca40","8b2df7226f5f44c699ea2c82a8fbb915","ec28714763044d58971ea78eedb6fc2f","f9f3305f8b9348cabff8a2779873d4a2","4fd634fa3c044d64b03bfdc2c2dd56b3","735389cdaf7244798481d39250673d60","404578ea28624f22bbe90ac04553ed5a","bc050c470fe249beb7ca7a0911fe4c32","0f279e9ba1964871b1e7c19a5d30e6a5","b0238cbc31884c17ba786e20295ccc38","00a4dbcd9b9946ff9fb55c13283d92d8","311ad7d6df714387acd534164a8b567b","aee82535f1364d839097141f69b16a79","85665a8491504ddc9070df6e3eb0f1f7","ed25736963cd4f10913985329e72a358","d8af9b02da044c6f88ce94a2a21ec6b8","d65e0a28f69847a582969154567c707d","0d34294e6d2143e3b2793b59ed34bf76","51bed108573443e897e24669e38faed4","2100620616e646f897ecc6f08aeb1edc","7594f57039f44030980ad6c882b49986","f91bea95c98349d48e0b5a0c4ec46953","30460c6060bf4557b8010661e696f8d9","0abe113b30aa4b3794e435d60ecaaea0","3bf941ebda6d4c949f3452c7bf85fbac","d300b00f20a54207bf5c3c9f49ec5ea6","7810d501ceab4ba6a85ea228c50af576","ff581b58f6ba488db65323c8744c7107","d34009fcc1a74f788e2c1dc3d085336d","6e30a9f058054a5ca76a2a3a79333eda","3c917ab578d14837b2bc54311e1d037a","d1edcda10ee447d8945b8337111a0de9","547e061683cf49a6b553dfb0d78b05e9","1d71952cf07a434e9e44775df4b9371a","018f7ac0feb2451e88bc47e12d3eb1ee","438edc45f2904447926899a4ea5be03d","3d40925c2d5e4fbfbc423fb0e1385254","46dd355205694f32a1cb730f95a59dc8","02c28f57a57846adaf9224f9673c3a6f","d5fb1003dd004d1d94c8add6d45b08fb","8cc4a14172ba4c2895c9b568e8f538d8","2ceb6c8e9d714b0395dd4c68d1fdc9af","6fc2c16a16984b1fac9a78f7682b0154","d38df924349c44588a10676f1df56845","0f307f5577944a6d94a471fc16f4df49","583c1da3a4224412b1c5950294ef921d","75c16dc5c6b541e19f1257ef390eb4ea","3bfc66fa0bfe44d586370eeae613d39d"]},"executionInfo":{"status":"ok","timestamp":1762068447282,"user_tz":-330,"elapsed":6734,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:001.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m56.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m132.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m94.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m123.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m34.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m15.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m34.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m11.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m35.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m10.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m26.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m19.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=d883e225b1b359a0bfbf9dbe32ed95dc9dbcc7c28ef0d1d639c3bec34aeb2062\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"285c1728dfe64f11abb1c1395a637c18"}},"metadata":{}}]},{"cell_type":"code","execution_count":1,"metadata":{"id":"JJ0Ga-8nlc52","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1762070904734,"user_tz":-330,"elapsed":30878,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"6afd4362-c58e-4ebf-d53c-6509f2c7a132"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG libraries imported successfully!\n"]}],"source":["# Import required libraries for advanced RAG\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","from pydantic import BaseModel, Field\n","\n","# Core LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.core.query_engine import RetrieverQueryEngine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","# Vector store\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","\n","# Embeddings and LLM\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components (we'll use these in the assignments)\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.output_parsers import PydanticOutputParser\n","\n","print(\"โœ… Advanced RAG libraries imported successfully!\")\n"]},{"cell_type":"code","source":["from google.colab import userdata\n","import os\n","os.environ['OPENROUTER_API_KEY'] = userdata.get('OPENROUTER_API_KEY')"],"metadata":{"id":"Uik_d5hWnn_C","executionInfo":{"status":"ok","timestamp":1762071114956,"user_tz":-330,"elapsed":812,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"execution_count":3,"outputs":[]},{"cell_type":"code","execution_count":4,"metadata":{"id":"RF2R69UVlc54","colab":{"base_uri":"https://localhost:8080/","height":459,"referenced_widgets":["0d4e778ce2bc4d7f84a9b159d05bf2b6","4977ed3b532240c885356641b141f5c4","f9d7d5e44caf48ed815f8d167181d904","cb558c81bd7e43c2b45e29b75d86b222","1259f780950e4dd2bd8fc87944b22f3b","6b9b6eeb256e498ea29b152fa47f1f36","74f68b2d9ca54f8095a9be6ad771f528","750764fe70cf4a16a67a89f7c85b539b","cfc2fb7007c741e3bd6a3f29f673ede9","70745c9d5d1745eea1c8a2400251eeb7","d0da9a1e6ec84a8b93d8ce3fb72a7ae9","1f692f9991534e6aa847f6890966caf4","078734716a0d4efea5df438bd0a4b51c","079a76a5724b4a189007c2cb9b7ca1a1","7b389403900a4c1698aa7ca09aa78302","65d6e6b5c51049d1817c2a03a7ae3caa","f40aa0e3521f4a85a68f7c543aee496b","44fb8f37d6434345a47aceb6b02a217c","b17f235748714d6ba5ed021394a43c3c","d4af1c47bd2b49fa86f1b224f63e5b7c","f06eb0332c5c431797192e2cb998ee83","33a93b53999341d6bf0ca9e1754e9cba","1e2188d9d6ce40299aa2393e0c208687","5793fb27ec9544108dfe77f8ce299cbd","9b854c10dbce44428c8bbe6e6196b4d8","0730ae13c5ee440491501b879528749c","32a7ca34940242fc9228f9118cc86400","d296861230004f0dbcefdc0aef0c799b","0bdc0ba98a9846baa866aec7cb8f5328","af769146050547f4a425a52025314ae5","f4703b05c5e34b83ad9a8ef7e035ad68","4b05141d94724a60aab612aa2908b68a","1de7f3bb437347218a4b987d4268d538","f71b8d1d53364ad0bada21dd27b8126c","8e66117394fa4f5da9890b7b3e4481cd","b6a9328cb77244a6955457b1bfb7cfee","410467ab82e14171ac2cd030a6982ced","5133e7ec7ec947c9b6739e5ea8bb3f3a","63aa33e6f6c044f8b61a2b57fc9747c0","0aa6897927d04493b5e396c1e7cabe6c","ff6ddc68f1014e96abee3a992a755d0a","38e364c18bdf412983e35c0be556e52e","844fce259f3a47719b9dcfb9601a3e6d","0b3489657185437d97026dbe1d490631","76f31bd8398d44c1b3fceb0ae9a26e91","4d4c7ff5f9584d28a1bb90b1beaa7412","5e76d942baab4dd7a8552c2ac3c0b407","bb4b9eb03a1d4d9fb103268227ff8004","c0547343ae0f4ed7b381fbc0c4ae93bb","016f797b07064a8d9c8bd9bc2611236b","838106398b5c488babaf3b863525c2e3","2d8fe82d21ad458cb86c2239a16f2cbd","30e2d5d41eba4766b5e5cca4385df191","fa2c338ef1e24743aef284870bc41f7d","2d1144d3e41c46dea262116421fef701","f2ad4204271844adbe1ed40f281a3899","58e499a30474434aa8227041d1154d23","2daad2012f604b729d37ade926d94047","75821c50c0874e04930d669d69f0f866","6d58ed3cc07048d2a09b816df028c2c3","eb0dac1aa2924095b42bb21af98d9aa3","13f10c6d29244a71af0b4f30d7b88ba9","fa3f4c64ec0e482dac4c2070d1fab3bc","204c41f8e86540d481507c587b5e901e","03dee2c39a9741d49d4808b1b2f8bbe2","0b5b0c74a68846389a7f411132966172","b7968234a76b4fd88789d24c4a7cdf1d","739d00ccb09e4af4a09c0f4c3f48d239","f80ae28ea6cc416c8ee1fd80ed39940f","2242dab5ff884c68ba605dad7c4fdc21","08a23ea4f1414549a85592bb8461d555","4e4cbdcdd82e4859817ec40749c561b7","d4ef65b96bee4d2e862071e961948740","9f63ca8b5f974c76b45c3291779b9655","6065cfe232df44fdb5e9fd14ea728e5e","dbf15ba21273463cbb5c1b67fc0e4eaf","29d086f3f97b4f11be1feaf75b3ef9b5","761c18af23eb4a19b53bac0442680967","7ce684c3cc9b4b41bb85d260844c0066","da9bbe46b52548a7b521dd35594d8ac3","e302b8daa7514623af2b4a8888216dbe","4b0bd2882e2e49b8969132c98bc07f5e","2f20529651174cbd835bf31e1b62bb22","20822de454a64952818c98655a9cf7f9","8919f3df339c4d5080cc0794d635d3b1","04c481c88a244522a212e90fd67c6c92","b63038835c5741a3b209c6c03e0db0fa","f1c01f86cb6d447a8aeef3194ed1f4a8","7133df37455941e1aed195a8ce4f25c5","75b55fb34fc94d11b34b616fa2654d8b","ead7063319dc48a8a861d8b2c39b651a","079612e23bbe4aaebd89bf152d8c9ce3","c60b6e584be54bd6ad54414e86f7de14","d386524e59154176840025ae9e970078","20e61a079e774948bc7a12468451b3f7","a9ebd4c6a83a423cb6f952bdcf0e12b1","e8fe602d76d142efb7464da16546f5fe","3c3aa45d6abb4fbda515e8bdbe54a6a3","52a528a0711d4e4ba6f09bd3c1d2b7a9","8ea2284ae24643848f33fcfd40599656","c04ce44d3d6048e7906746e9cb3c6faf","feb903974c724d83b823b9b9db685dc0","26e46f3f540f457493cb463a74414556","44514cad7f6d4c2296d94072f532ded4","07f8bb3c13174c90a8f354fc75902d11","dce6c646ccee410395b8e673c8920078","1d218c1b9c204f9c9235a32528985fc2","79ee32b5d6734020bbcb6ef652b23338","43910c725c4a42d4bbf249f31537dd99","886670a5300f485cbf74d8387ffbaa9a","284b68cbc6fe44ff8a522f1abda59676","9bc8cec22b8e4aee8fee85954b514363","199a38c87583407393fe3b603eb02f1d","d6b077eb5b764122b939f6e671ca57ad","f8e9900357ff44328a1212afef5a889d","77f4f5415cc145ed9f290582d3ca657c","54e16bf4343d4e3e93f4925de99366db","fd8d1f04670d4c44b9a0356936a95eda","cd6ef1a3382b47a999bfc10a97c71a37","5f8c669051424bf080e8b5db026622a4","16b2cc180dea4a0b89cdd3fd6ec69c1d"]},"executionInfo":{"status":"ok","timestamp":1762071140539,"user_tz":-330,"elapsed":12620,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"f1fcf1e6-b537-45b0-b570-14a2bda3a444"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OPENROUTER_API_KEY found - full advanced RAG functionality available\n"]},{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:001.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m40.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m112.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m49.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m148.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m31.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m19.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m137.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m10.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m33.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m10.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m23.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m16.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=dd69ee0ad398f0240f4165fe401df516af6e97e6409d3adc77ac0af06c5cee84\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"6eef75da3a0e4309a5f8eda3c091bd21"}},"metadata":{}}]},{"cell_type":"markdown","metadata":{"id":"ZQOryM09wqJN"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries for building your Gradio RAG application.\n"]},{"cell_type":"code","execution_count":1,"metadata":{"id":"qExgOe9BwqJO","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1762073924511,"user_tz":-330,"elapsed":36576,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"60184baf-69d9-4481-b712-164f44b955a4"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","\n","# LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"code","source":["from google.colab import userdata\n","import os\n","\n","os.environ['OPENROUTER_API_KEY'] = userdata.get('OPENROUTER_API_KEY')"],"metadata":{"id":"7qozvM2cyoeO","executionInfo":{"status":"ok","timestamp":1762073993760,"user_tz":-330,"elapsed":635,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"execution_count":2,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"rkRKSzfNwqJQ"},"source":["## ๐Ÿค– Part 2: RAG Backend Class\n","\n","Create a simple RAG backend that can initialize the database and answer queries.\n"]},{"cell_type":"code","execution_count":3,"metadata":{"id":"fmL7j1nHwqJR","colab":{"base_uri":"https://localhost:8080/","height":387,"referenced_widgets":["a973986393074781b9ab87caba9ee383","c9191ff9750a4e39b05ea61ddb795a86","f6a80f84c6134fa788c063f006b48164","9ed4cbfa352d4f02903708f97f3629a9","5caed79662044187be483e39cca1b982","5a79b77c48ad431dbf750dfd561634f7","51b53b9a7d3043aa97d5cddd2ec527fd","582a7ae98cce4f6c9e300d12724f056a","34a4adcbd3c246d3adbce056999f8c44","97ca03b071dc4291a21063474ff7d55b","14863d957d5e49558f52df9aa02c26ba","0462c961a8ab4084b88f109a0119acf9","ef0521ed62784b2eb538e6d87f877c53","b28323d907f9488d96c1d0631ae9bcba","57fdd654ce984842b670abbb42a04127","17b8e3cab85b4e11ae65e942ea98df52","01305f7fbafc41b4bed560c37a743d11","3c27b03ecb70422691a90be77f6ea1f9","ed7cbfa1cf694c2ca051f69e3620573b","cf5085db9dfc4f8aa2e7b2b7793ec7b6","6e649397fe7e49f581dc5d7ef8b445ce","71ea987580fc446196f039797a1676d9","7548f56a62824efb91f205276b99e7ab","761a0c7aa477464d8209b68c6b69dff0","04742c31b16045fca7ef5a65b5ac878f","6c290b5fdbfb498fa8b7422931b05c33","c6bd86321e3d4653a90d78903c068b92","f6bd05b4330948178dba2e440ea95441","7253f5792e1f4b538aa8aa6540cf80a8","2ebfe0a57d11494dbb456e311a3aed8d","74ddf070acd847b6a3d49b57a586c7f5","c6d0dc7b095c4cd1a6a29819460b0101","58c29e5702e944ae86d4f947dde653df","6206c59ea1db426995cb0ed8288917ff","d15c517b4ab4499cbcccaa2cff0aa2e9","3526762b8e0c44f2a6a18767536fd725","a759634aa42446a6b1ad4b6e49fac83c","5d6164f77be54702a827a89dc3e5248c","77c2a6bebef64919a979530abc209cc4","7600f91f92274ea0889094669895bba0","0e6da0ef3c5d4b8fa07c221cdb76ebb4","7cc5d833162b4d2b9e4e10d64a8bdff1","fde7415fefc54e6a94feacb68e8e46b8","1a017864a1e844a989cdbdf785c6deaa","2c4975e93a894bc09970d691c5fbd308","7f4769cadb5b4ce28044c1752be4240f","8f2b6121e080476aa755fe01751b3f03","8dc23a7066be4b139d25c10808866df7","030f67c44d5a461cb5071413df664834","d9c6e3b5832944b1a65d8fe13cc285ba","d2626f3567d844d5a266f392e180e85e","779c83badb6a412cb7846e9333a5020d","002814d59a6146ed9302bf3bfb40402a","a5251a34d0194c419928f538e13330b4","cb653bb5ec8e482e9ca9a6ec3e2e0bbd","8e7205eb26e24a08b07575caedcac6f8","0b7981c256e54898898032bbaf5ae254","51b295a2094240bd8dd18c7a0e5946ea","5ae9b8349ac0482f9a229caf349ee536","60a05b4b17ab49b0a70ab484d8d4fa22","6908777b26ef412189e2f23b77d7257f","07111eb10ccf4d42843b17d9838acd3f","e98729b748ce48ecb13eacb927b359ad","4dcad302c3ef4dc68342f9f62641b49d","a5d2fddf2c1048578316afc1472ecbaf","4e0068aca2d948269baad41bef12553f","7bce2d5db17047ebaef5a9262ee02123","35ad866cd32d4b81bbba8cdef8daeece","439b9e0c5e6e4403ba573c3b21a4c2ad","4f650f97ab674a61ab5ab039c41f9d6c","bd0ee9e36fe1499387a18f08542de047","69b8a46a04964632ab36dbff9c839ec7","d2887460344843ba97e538f19f86aaff","a46cd13fef074bbf829292d31f054dcf","5542e9cef0de47a282140985fa5e7c42","2461975590c2470ca56815ac8776f785","040eb0cf0f96419d8260b29b65e06705","340fe32a86de4a1b8d6dbd5d77bae32e","ad5174fcf4c740689665337236c5be8a","f0a7fa8e7c9a4a63b968f8390e70cc02","8c1e193a05c44c37a0e46de8d44c65b6","573a3932932d407dbc069e7ff2d746f3","9a83c384ab144b4c97c31bc222c39f82","48daa21f1e9b44b5818ce648a36f5ba7","e09129fe146b4181883d46789b43d139","e7c97957c95a45018c27908595eb4225","1fe1fe6f95794f5f96ad8f306339dd72","f43cf494e30e4adca5b5c781fdb3a2e9","7657eaad04604636bdbe8ecd8d72726b","2538b9bde7d34319bbaeb709f4ac1f9d","7c50b1b5814e4762b6051b84d4bff8e7","49ccbc26cf164aaf9da5a046dda184e8","ef4b524c0fd54f208eeaf1e700021878","408d931c780e46f89368c9045a8bace0","9a6c0a30e11e4c1684f7bdc1f727957f","b34eabd1790b48b2ad1e00d80514d290","6762d1fc7f7f4b57be4ec176d4f9c29b","c4512ebcb4bb4d7094c643cf07e89d03","29bebdc1f8504d14af28d724366ad7b2","f3b33290d5ee482b8cfa2d3d714c0fc6","495b6fa6204546b1be61603d17ae07d8","807ff520a6394fecb59b43421b965ad9","e61ba4716c0141909389f68f52d95a5e","f893be25aab04b6fb28b85816f2a9967","d7feae08b8d34a128f204d3629232de4","9bfa8cb03b0549ab8d165e868a1b2be4","a34eac465f204ee195381db32ec32982","06b22ef1c771413f84a7e58aee89f945","0aec93f287c5489c8c21df48c91dab41","49d45c8c131e4a12b5711d7bdbc7916f","f046a9b1567a41f4b4fc96642f19d239","36b18bff4a884aa5bd8aeb4099630b27","0db87ddc58ce43128b9835b9b4227e5c","36f26b19ab3644999fc84821e9811297","3a1c6eb0cab14c419f0bdb44b980a0d0","59aabff07295424583ae485bdeb79ab9","b4549f5395cd431cb6010ba85e1048bd","b6318d80fde94bd891cf17bbf6ece41a","65da1e3aebb94590bda1dfd77e43f290","55d292383285444e877b94cd78236621","6827cda7c25c47aa81c4a0b398200c9c"]},"executionInfo":{"status":"ok","timestamp":1762074066989,"user_tz":-330,"elapsed":9703,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"6b9e8759-f50a-4a12-c8b1-c98776d9b9c1"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00"],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":5}],"source":["print(\"๐ŸŽ‰ Launching your Basic RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Enter a question in the query box\")\n","print(\"4. Click 'Ask Question' to get AI response\")\n","print(\"\")\n","print(\"๐Ÿ’ก Example questions to try:\")\n","print(\"- What are the main topics in the documents?\")\n","print(\"- Summarize the key findings\")\n","print(\"- Explain the methodology used\")\n","print(\"\")\n","print(\"๐Ÿš€ Launch your app:\")\n","\n","# Your launch code here:\n","# Uncomment when implemented\n","basic_interface.launch()"]},{"cell_type":"markdown","metadata":{"id":"O4AiEyz-wqJW"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [x] RAG backend is provided and working\n","- [ ] Created Gradio interface with required components:\n"," - [ ] Title and description using gr.Markdown()\n"," - [ ] Initialize database button using gr.Button()\n"," - [ ] Status output using gr.Textbox()\n"," - [ ] Query input field using gr.Textbox()\n"," - [ ] Submit query button using gr.Button()\n"," - [ ] Response output area using gr.Textbox()\n","- [ ] Connected buttons to backend functions using .click()\n","- [ ] Successfully launched the application\n","- [ ] Tested the full workflow (initialize โ†’ query โ†’ response)\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built your first Gradio RAG application! You now have:\n","\n","- A functional web interface for your RAG system\n","- Understanding of Gradio basics and component connections\n","- A foundation for building more complex AI applications\n","\n","**Next Steps**: Complete Assignment 3b to add advanced configuration options to your RAG interface!\n"]}],"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[{"file_id":"https://github.com/ashisa4e/ai-accelerator-C2/blob/main/Day_6/session_2/assignments/assignment_3a_basic_gradio_rag.ipynb","timestamp":1762073521024}],"gpuType":"T4"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"a973986393074781b9ab87caba9ee383":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_c9191ff9750a4e39b05ea61ddb795a86","IPY_MODEL_f6a80f84c6134fa788c063f006b48164","IPY_MODEL_9ed4cbfa352d4f02903708f97f3629a9"],"layout":"IPY_MODEL_5caed79662044187be483e39cca1b982"}},"c9191ff9750a4e39b05ea61ddb795a86":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5a79b77c48ad431dbf750dfd561634f7","placeholder":"โ€‹","style":"IPY_MODEL_51b53b9a7d3043aa97d5cddd2ec527fd","value":"modules.json:โ€‡100%"}},"f6a80f84c6134fa788c063f006b48164":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_582a7ae98cce4f6c9e300d12724f056a","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_34a4adcbd3c246d3adbce056999f8c44","value":349}},"9ed4cbfa352d4f02903708f97f3629a9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_97ca03b071dc4291a21063474ff7d55b","placeholder":"โ€‹","style":"IPY_MODEL_14863d957d5e49558f52df9aa02c26ba","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡36.3kB/s]"}},"5caed79662044187be483e39cca1b982":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5a79b77c48ad431dbf750dfd561634f7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"51b53b9a7d3043aa97d5cddd2ec527fd":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"582a7ae98cce4f6c9e300d12724f056a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"34a4adcbd3c246d3adbce056999f8c44":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"97ca03b071dc4291a21063474ff7d55b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"14863d957d5e49558f52df9aa02c26ba":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0462c961a8ab4084b88f109a0119acf9":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ef0521ed62784b2eb538e6d87f877c53","IPY_MODEL_b28323d907f9488d96c1d0631ae9bcba","IPY_MODEL_57fdd654ce984842b670abbb42a04127"],"layout":"IPY_MODEL_17b8e3cab85b4e11ae65e942ea98df52"}},"ef0521ed62784b2eb538e6d87f877c53":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_01305f7fbafc41b4bed560c37a743d11","placeholder":"โ€‹","style":"IPY_MODEL_3c27b03ecb70422691a90be77f6ea1f9","value":"config_sentence_transformers.json:โ€‡100%"}},"b28323d907f9488d96c1d0631ae9bcba":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ed7cbfa1cf694c2ca051f69e3620573b","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cf5085db9dfc4f8aa2e7b2b7793ec7b6","value":124}},"57fdd654ce984842b670abbb42a04127":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6e649397fe7e49f581dc5d7ef8b445ce","placeholder":"โ€‹","style":"IPY_MODEL_71ea987580fc446196f039797a1676d9","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡15.1kB/s]"}},"17b8e3cab85b4e11ae65e942ea98df52":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"01305f7fbafc41b4bed560c37a743d11":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3c27b03ecb70422691a90be77f6ea1f9":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ed7cbfa1cf694c2ca051f69e3620573b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cf5085db9dfc4f8aa2e7b2b7793ec7b6":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"6e649397fe7e49f581dc5d7ef8b445ce":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"71ea987580fc446196f039797a1676d9":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7548f56a62824efb91f205276b99e7ab":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_761a0c7aa477464d8209b68c6b69dff0","IPY_MODEL_04742c31b16045fca7ef5a65b5ac878f","IPY_MODEL_6c290b5fdbfb498fa8b7422931b05c33"],"layout":"IPY_MODEL_c6bd86321e3d4653a90d78903c068b92"}},"761a0c7aa477464d8209b68c6b69dff0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f6bd05b4330948178dba2e440ea95441","placeholder":"โ€‹","style":"IPY_MODEL_7253f5792e1f4b538aa8aa6540cf80a8","value":"README.md:โ€‡"}},"04742c31b16045fca7ef5a65b5ac878f":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_2ebfe0a57d11494dbb456e311a3aed8d","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_74ddf070acd847b6a3d49b57a586c7f5","value":1}},"6c290b5fdbfb498fa8b7422931b05c33":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c6d0dc7b095c4cd1a6a29819460b0101","placeholder":"โ€‹","style":"IPY_MODEL_58c29e5702e944ae86d4f947dde653df","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡8.44MB/s]"}},"c6bd86321e3d4653a90d78903c068b92":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f6bd05b4330948178dba2e440ea95441":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7253f5792e1f4b538aa8aa6540cf80a8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2ebfe0a57d11494dbb456e311a3aed8d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"74ddf070acd847b6a3d49b57a586c7f5":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"c6d0dc7b095c4cd1a6a29819460b0101":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"58c29e5702e944ae86d4f947dde653df":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6206c59ea1db426995cb0ed8288917ff":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_d15c517b4ab4499cbcccaa2cff0aa2e9","IPY_MODEL_3526762b8e0c44f2a6a18767536fd725","IPY_MODEL_a759634aa42446a6b1ad4b6e49fac83c"],"layout":"IPY_MODEL_5d6164f77be54702a827a89dc3e5248c"}},"d15c517b4ab4499cbcccaa2cff0aa2e9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_77c2a6bebef64919a979530abc209cc4","placeholder":"โ€‹","style":"IPY_MODEL_7600f91f92274ea0889094669895bba0","value":"sentence_bert_config.json:โ€‡100%"}},"3526762b8e0c44f2a6a18767536fd725":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_0e6da0ef3c5d4b8fa07c221cdb76ebb4","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_7cc5d833162b4d2b9e4e10d64a8bdff1","value":52}},"a759634aa42446a6b1ad4b6e49fac83c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_fde7415fefc54e6a94feacb68e8e46b8","placeholder":"โ€‹","style":"IPY_MODEL_1a017864a1e844a989cdbdf785c6deaa","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡5.79kB/s]"}},"5d6164f77be54702a827a89dc3e5248c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"77c2a6bebef64919a979530abc209cc4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7600f91f92274ea0889094669895bba0":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0e6da0ef3c5d4b8fa07c221cdb76ebb4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7cc5d833162b4d2b9e4e10d64a8bdff1":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"fde7415fefc54e6a94feacb68e8e46b8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1a017864a1e844a989cdbdf785c6deaa":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2c4975e93a894bc09970d691c5fbd308":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_7f4769cadb5b4ce28044c1752be4240f","IPY_MODEL_8f2b6121e080476aa755fe01751b3f03","IPY_MODEL_8dc23a7066be4b139d25c10808866df7"],"layout":"IPY_MODEL_030f67c44d5a461cb5071413df664834"}},"7f4769cadb5b4ce28044c1752be4240f":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_d9c6e3b5832944b1a65d8fe13cc285ba","placeholder":"โ€‹","style":"IPY_MODEL_d2626f3567d844d5a266f392e180e85e","value":"config.json:โ€‡100%"}},"8f2b6121e080476aa755fe01751b3f03":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_779c83badb6a412cb7846e9333a5020d","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_002814d59a6146ed9302bf3bfb40402a","value":743}},"8dc23a7066be4b139d25c10808866df7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a5251a34d0194c419928f538e13330b4","placeholder":"โ€‹","style":"IPY_MODEL_cb653bb5ec8e482e9ca9a6ec3e2e0bbd","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡99.0kB/s]"}},"030f67c44d5a461cb5071413df664834":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d9c6e3b5832944b1a65d8fe13cc285ba":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d2626f3567d844d5a266f392e180e85e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"779c83badb6a412cb7846e9333a5020d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"002814d59a6146ed9302bf3bfb40402a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a5251a34d0194c419928f538e13330b4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cb653bb5ec8e482e9ca9a6ec3e2e0bbd":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"8e7205eb26e24a08b07575caedcac6f8":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0b7981c256e54898898032bbaf5ae254","IPY_MODEL_51b295a2094240bd8dd18c7a0e5946ea","IPY_MODEL_5ae9b8349ac0482f9a229caf349ee536"],"layout":"IPY_MODEL_60a05b4b17ab49b0a70ab484d8d4fa22"}},"0b7981c256e54898898032bbaf5ae254":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6908777b26ef412189e2f23b77d7257f","placeholder":"โ€‹","style":"IPY_MODEL_07111eb10ccf4d42843b17d9838acd3f","value":"model.safetensors:โ€‡100%"}},"51b295a2094240bd8dd18c7a0e5946ea":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e98729b748ce48ecb13eacb927b359ad","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4dcad302c3ef4dc68342f9f62641b49d","value":133466304}},"5ae9b8349ac0482f9a229caf349ee536":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a5d2fddf2c1048578316afc1472ecbaf","placeholder":"โ€‹","style":"IPY_MODEL_4e0068aca2d948269baad41bef12553f","value":"โ€‡133M/133Mโ€‡[00:01<00:00,โ€‡126MB/s]"}},"60a05b4b17ab49b0a70ab484d8d4fa22":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6908777b26ef412189e2f23b77d7257f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"07111eb10ccf4d42843b17d9838acd3f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e98729b748ce48ecb13eacb927b359ad":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4dcad302c3ef4dc68342f9f62641b49d":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a5d2fddf2c1048578316afc1472ecbaf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4e0068aca2d948269baad41bef12553f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7bce2d5db17047ebaef5a9262ee02123":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_35ad866cd32d4b81bbba8cdef8daeece","IPY_MODEL_439b9e0c5e6e4403ba573c3b21a4c2ad","IPY_MODEL_4f650f97ab674a61ab5ab039c41f9d6c"],"layout":"IPY_MODEL_bd0ee9e36fe1499387a18f08542de047"}},"35ad866cd32d4b81bbba8cdef8daeece":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_69b8a46a04964632ab36dbff9c839ec7","placeholder":"โ€‹","style":"IPY_MODEL_d2887460344843ba97e538f19f86aaff","value":"tokenizer_config.json:โ€‡100%"}},"439b9e0c5e6e4403ba573c3b21a4c2ad":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a46cd13fef074bbf829292d31f054dcf","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_5542e9cef0de47a282140985fa5e7c42","value":366}},"4f650f97ab674a61ab5ab039c41f9d6c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2461975590c2470ca56815ac8776f785","placeholder":"โ€‹","style":"IPY_MODEL_040eb0cf0f96419d8260b29b65e06705","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡42.7kB/s]"}},"bd0ee9e36fe1499387a18f08542de047":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"69b8a46a04964632ab36dbff9c839ec7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d2887460344843ba97e538f19f86aaff":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a46cd13fef074bbf829292d31f054dcf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5542e9cef0de47a282140985fa5e7c42":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2461975590c2470ca56815ac8776f785":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"040eb0cf0f96419d8260b29b65e06705":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"340fe32a86de4a1b8d6dbd5d77bae32e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ad5174fcf4c740689665337236c5be8a","IPY_MODEL_f0a7fa8e7c9a4a63b968f8390e70cc02","IPY_MODEL_8c1e193a05c44c37a0e46de8d44c65b6"],"layout":"IPY_MODEL_573a3932932d407dbc069e7ff2d746f3"}},"ad5174fcf4c740689665337236c5be8a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9a83c384ab144b4c97c31bc222c39f82","placeholder":"โ€‹","style":"IPY_MODEL_48daa21f1e9b44b5818ce648a36f5ba7","value":"vocab.txt:โ€‡"}},"f0a7fa8e7c9a4a63b968f8390e70cc02":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e09129fe146b4181883d46789b43d139","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_e7c97957c95a45018c27908595eb4225","value":1}},"8c1e193a05c44c37a0e46de8d44c65b6":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1fe1fe6f95794f5f96ad8f306339dd72","placeholder":"โ€‹","style":"IPY_MODEL_f43cf494e30e4adca5b5c781fdb3a2e9","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡11.4MB/s]"}},"573a3932932d407dbc069e7ff2d746f3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9a83c384ab144b4c97c31bc222c39f82":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"48daa21f1e9b44b5818ce648a36f5ba7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e09129fe146b4181883d46789b43d139":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"e7c97957c95a45018c27908595eb4225":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"1fe1fe6f95794f5f96ad8f306339dd72":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f43cf494e30e4adca5b5c781fdb3a2e9":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7657eaad04604636bdbe8ecd8d72726b":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_2538b9bde7d34319bbaeb709f4ac1f9d","IPY_MODEL_7c50b1b5814e4762b6051b84d4bff8e7","IPY_MODEL_49ccbc26cf164aaf9da5a046dda184e8"],"layout":"IPY_MODEL_ef4b524c0fd54f208eeaf1e700021878"}},"2538b9bde7d34319bbaeb709f4ac1f9d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_408d931c780e46f89368c9045a8bace0","placeholder":"โ€‹","style":"IPY_MODEL_9a6c0a30e11e4c1684f7bdc1f727957f","value":"tokenizer.json:โ€‡"}},"7c50b1b5814e4762b6051b84d4bff8e7":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_b34eabd1790b48b2ad1e00d80514d290","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_6762d1fc7f7f4b57be4ec176d4f9c29b","value":1}},"49ccbc26cf164aaf9da5a046dda184e8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c4512ebcb4bb4d7094c643cf07e89d03","placeholder":"โ€‹","style":"IPY_MODEL_29bebdc1f8504d14af28d724366ad7b2","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡45.4MB/s]"}},"ef4b524c0fd54f208eeaf1e700021878":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"408d931c780e46f89368c9045a8bace0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9a6c0a30e11e4c1684f7bdc1f727957f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b34eabd1790b48b2ad1e00d80514d290":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"6762d1fc7f7f4b57be4ec176d4f9c29b":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"c4512ebcb4bb4d7094c643cf07e89d03":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"29bebdc1f8504d14af28d724366ad7b2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f3b33290d5ee482b8cfa2d3d714c0fc6":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_495b6fa6204546b1be61603d17ae07d8","IPY_MODEL_807ff520a6394fecb59b43421b965ad9","IPY_MODEL_e61ba4716c0141909389f68f52d95a5e"],"layout":"IPY_MODEL_f893be25aab04b6fb28b85816f2a9967"}},"495b6fa6204546b1be61603d17ae07d8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_d7feae08b8d34a128f204d3629232de4","placeholder":"โ€‹","style":"IPY_MODEL_9bfa8cb03b0549ab8d165e868a1b2be4","value":"special_tokens_map.json:โ€‡100%"}},"807ff520a6394fecb59b43421b965ad9":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a34eac465f204ee195381db32ec32982","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_06b22ef1c771413f84a7e58aee89f945","value":125}},"e61ba4716c0141909389f68f52d95a5e":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0aec93f287c5489c8c21df48c91dab41","placeholder":"โ€‹","style":"IPY_MODEL_49d45c8c131e4a12b5711d7bdbc7916f","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡16.1kB/s]"}},"f893be25aab04b6fb28b85816f2a9967":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d7feae08b8d34a128f204d3629232de4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9bfa8cb03b0549ab8d165e868a1b2be4":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a34eac465f204ee195381db32ec32982":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"06b22ef1c771413f84a7e58aee89f945":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0aec93f287c5489c8c21df48c91dab41":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"49d45c8c131e4a12b5711d7bdbc7916f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f046a9b1567a41f4b4fc96642f19d239":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_36b18bff4a884aa5bd8aeb4099630b27","IPY_MODEL_0db87ddc58ce43128b9835b9b4227e5c","IPY_MODEL_36f26b19ab3644999fc84821e9811297"],"layout":"IPY_MODEL_3a1c6eb0cab14c419f0bdb44b980a0d0"}},"36b18bff4a884aa5bd8aeb4099630b27":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_59aabff07295424583ae485bdeb79ab9","placeholder":"โ€‹","style":"IPY_MODEL_b4549f5395cd431cb6010ba85e1048bd","value":"config.json:โ€‡100%"}},"0db87ddc58ce43128b9835b9b4227e5c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_b6318d80fde94bd891cf17bbf6ece41a","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_65da1e3aebb94590bda1dfd77e43f290","value":190}},"36f26b19ab3644999fc84821e9811297":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_55d292383285444e877b94cd78236621","placeholder":"โ€‹","style":"IPY_MODEL_6827cda7c25c47aa81c4a0b398200c9c","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡24.5kB/s]"}},"3a1c6eb0cab14c419f0bdb44b980a0d0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"59aabff07295424583ae485bdeb79ab9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b4549f5395cd431cb6010ba85e1048bd":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b6318d80fde94bd891cf17bbf6ece41a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"65da1e3aebb94590bda1dfd77e43f290":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"55d292383285444e877b94cd78236621":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6827cda7c25c47aa81c4a0b398200c9c":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Ashish_Sahu/Day_06/Day_0602_assignment_3b_advanced_gradio_rag.ipynb b/Ashish_Sahu/Day_06/Day_0602_assignment_3b_advanced_gradio_rag.ipynb deleted file mode 100644 index a680427..0000000 --- a/Ashish_Sahu/Day_06/Day_0602_assignment_3b_advanced_gradio_rag.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"yD43WL2Q1LFe"},"source":["# Assignment 3b: Advanced Gradio RAG Frontend\n","## Day 6 Session 2 - Building Configurable RAG Applications\n","\n","In this assignment, you'll extend your basic RAG interface with advanced configuration options to create a professional, feature-rich RAG application.\n","\n","**New Features to Add:**\n","- Model selection dropdown (gpt-4o, gpt-4o-mini)\n","- Temperature slider (0 to 1 with 0.1 intervals)\n","- Chunk size configuration\n","- Chunk overlap configuration \n","- Similarity top-k slider\n","- Node postprocessor multiselect\n","- Similarity cutoff slider\n","- Response synthesizer multiselect\n","\n","**Learning Objectives:**\n","- Advanced Gradio components and interactions\n","- Dynamic RAG configuration\n","- Professional UI design patterns\n","- Parameter validation and handling\n","- Building production-ready AI applications\n","\n","**Prerequisites:**\n","- Completed Assignment 3a (Basic Gradio RAG)\n","- Understanding of RAG parameters and their effects\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"dhxse7xA1Sut","executionInfo":{"status":"ok","timestamp":1762075520436,"user_tz":-330,"elapsed":22349,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"ce919051-cf07-4161-def6-9bdb14833218"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["!pip install -r '/content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"aiZeXpz-14A4","executionInfo":{"status":"ok","timestamp":1762075561961,"user_tz":-330,"elapsed":38346,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"d7c14a6b-639e-4b7f-faa5-5d50cd58adb4"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m22.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/Colab Notebooks/Day06/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m37.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m141.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m98.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m136.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m32.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m5.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m22.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m105.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m9.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m5.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m32.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m25.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m16.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=a8491b5e0bca8f176b580c701ed0136cd140607a1e2570c79b47e18de74180f8\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"edc8623e60054cbabc0892c4b20e8935"}},"metadata":{}}]},{"cell_type":"markdown","metadata":{"id":"dWJ42Ql-1LFh"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries including advanced RAG components for configuration options.\n","\n","**Note:** This assignment uses OpenRouter for LLM access (not OpenAI). Make sure you have your `OPENROUTER_API_KEY` environment variable set.\n"]},{"cell_type":"code","execution_count":1,"metadata":{"id":"NkEkhVV91LFi","outputId":"7260ceb9-a36c-4dbe-bc0f-52924120dd3b","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1762075612092,"user_tz":-330,"elapsed":35791,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import all required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","\n","# LlamaIndex core components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"vmwNykqv1LFk"},"source":["## ๐Ÿค– Part 2: Advanced RAG Backend Class\n","\n","Create an advanced RAG backend that supports dynamic configuration of all parameters.\n"]},{"cell_type":"code","source":["from google.colab import userdata\n","import os\n","\n","os.environ['OPENROUTER_API_KEY'] = userdata.get('OPENROUTER_API_KEY')"],"metadata":{"id":"4dXeRTbF2baB","executionInfo":{"status":"ok","timestamp":1762075672260,"user_tz":-330,"elapsed":682,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"execution_count":2,"outputs":[]},{"cell_type":"code","execution_count":3,"metadata":{"id":"TXs_n_n91LFk","outputId":"d39227be-7c7c-4197-ad77-7919b52bf700","colab":{"base_uri":"https://localhost:8080/","height":387,"referenced_widgets":["f63d9b0bc8d84a7e8c3dde8092748efb","fff8a5e6f2584c11ae92669e90149b55","9c45c10affea4bd38fa42d018336a1ae","d6bbb69e7ac245059fc5b43367098035","5d44834a81b049faa99d15b0ddea1822","bf96d6969e5f47f488aa470a798ace3b","0fa82b947b7c4a9db115fa0aae66b699","5968af086e2f45f2b80eb98d9c7d6b07","e55ff8ac1ece44fe93396642a8f69cf5","70dcd0c31cc343ceac79ee5b9e279d16","30c784db2c7d40d197bba8bb7324e851","8faf85a59cb74247bc3d578f9ad72556","67a93fb500c6448e87e8cd513af1b84b","4ed87ce306114c69bcb2a8f3ef85d036","fe2896eff96b46b2b1189ef064917933","d97bd850ff314cc99dac0ac1ad538bcf","8829a5910f244946adf2acf7adbb0e52","284db2b5bac74176b9f379065a5b1b8b","2e69ddccea2544819b62883cb452a4ed","4f949c97f4c64f33bf542e5256a47801","8cd06e3730b4407aabb2ac08c1333c97","06f033ed179a455a97b70c76b94d2e5e","0f7d7381db954c048c248ca08413663d","0b9a45dd94c245a0ba57fffdd5727987","b5816c37f368443db8fb660e5c477049","047a76c191f24bac981c0185c60212fb","f30466d966ea43a79c32811b4947b8a7","d37c57ea9e174b4b8c2543d32766d0dc","d42d2de19c944348939b82fa7a7a499f","575a711d6dde466282b3e28902a4d5aa","b096c1215b294f84a3a577492614fa8f","60b5b36707dc420280feffe69d8df9a6","146c38e89bd5461aa57f899e8b2d4f5e","3560cb2099bf483b82aa4b9788f85fe2","0ac1c922a0284af59cdaf31eef0593c8","1ab1773aa2954ec68989c11522b39d5a","f7001509bce4443fa8a23f2f6df098f4","e0516b54f7c34526bdda4570d41e2e33","a2ae6f2393af443e955509461fe5574c","6a65e3300966430da3087f53b7de43af","6217eb450c1d4c5086cd9a362cc07fb5","c435ed6cc9c142708ccfe7ba230f47d8","0607b07be7334929bbba5d7777853fb6","2c7898f5cd0d4bffa96981c66b057349","5cc55270cf5f4fb29f0ae6d5dfa7fd2d","3500338d17a94911b62f2040e3414abf","38532cc922f24e21880a136c390375f6","21539a7ae6e4444c84787537dd38b033","67ebb27f52ee450f9b57c1c7f7ee9d41","97e80744aa8a43f6ba7bde74b7678bc7","011e63116b8b4f28b7fa86a79e186906","e0a5378ea1f1448a972d29c8c7b6944c","4666ff409d9343f0b09ca1b6af423928","3e0a30dd64524579b25a7778930b5918","d995f28420e841dd8aa67faebcac3e8f","ac684e852984462b81d000b8730b2ed9","3d953152b1d34d9eab6f2473d8004c50","942410d2ac2b4fe49e889a00738e0fd9","67ff85788637456490392ba903c3e199","2f0ef2b89fa0472fa3c7cafef0de6b14","19eba7a723444c30ac8be9577d838e46","d75f9de26eeb4682a46d3ce413a22e9a","3f8c9aa883634411a70a0169392cbbf1","5474ba18dfd242b581c80fe3b6d15799","a7f819f13a794657b3c87bf765bcec6e","32f40e38d5e04d9e9a093061e120288a","5ee691f97b44433694fa9b3d92cf9b9d","5d519cb6b7b54072a36fc765c47cd4d8","5cdcb1e596b741ec817de51bdcb3a2c2","2a3d187866bd45e390b2649cf014d530","3e4e3d854fc04d5988224511bba1314f","b7b3765e34754470900d14d92b5981de","187bd0c0d9a54535bbda26a4cb1c554c","8cda645cb6894bb7b50e86f46bcb8550","cef23dae0dab4d9bad8d2389ef59bed4","cf4cc2efff4c47058932f1726dc91aa5","0110ce6585884566b2288cc38777fc08","daf78560c8f7432b8c3a17c903315e9e","417adb1b6b924af0a0a928e0ed85ac47","662b8533613941b88a9add3a92e86ff0","9dfaa0fa00aa4c1ea07a2df46d22db7c","e1e7d73adf3945228ee21e5616e84b3c","4b935025dbba4d9db62661bb2a1967dd","7f62a70857c34e30ad404e99991d07e8","10913e7c9e274b329e84e8b7e3e6296a","09b91ef09bba405fb57babb479941496","699aa016e17a4ec4bedcb0f80d53894d","df772d0a1ccf44a6884d5667108fa78e","563da780ce984bd392843d988aa8556d","2d433fdd51334661bb1b0b322f68eade","c616936be96f469bbc4d0f6da8ea27c7","8d0c67aedc5d4155b7b5a41f7000d4af","801e6d4e2da94ee6a179964994005e0d","e766ff5d98e44979bc4716925c888377","1664d1ab377c411bbacb5b6a9066e688","f0046f18629f41f5819c87d95797f0c6","d4fcebe817e74bfdadce92f57ddc2bcc","ab83e2c2823545b5a55c4bf17849318b","3871bf8d590b49e294bd1d3d163370fe","4f478822c02145cfb8d2760c30c884aa","f2122cbd970945718c7ac59e9c35b3bc","9275ef9315cd4d01ba2f51f3e0382036","56b023c9af1a4ff48e5eaed399b328b1","dc6fd15fdf304f35832ef7d5d8c9873a","771b4272701046c4a903d2ba63c12b9c","34ad318ff32045e89111e9d28ec3c257","4e2bf7b0b639459999c015e143301c32","c47bd68998a34c02b5a0c0ae6fba1faf","ae43e3f64c264b3290d450eefd79ee99","fd4441c169e746f895c545249be968ac","870874be9a3e440f98ee0b13b462c0e7","3606e41741d94df8ada91ac7a7a375da","c028ad2ae17541feb74b3b995851e164","a2452ec0dcf54c41bf2bcec5ce260983","901f0526f262469cb8d9bbbd8682e7b6","237286670806486cb055335ffda9d4bc","bd79ec57b44f41d4a4d7870df461f038","99c4e470958045799ef41046249c4f04","f6f9f28b9e144850977722f470a8b2d9","5ddb850ca36f463c94cbff636ed2b91c","c5d7426c52b04b86a32a38984aee3f7b"]},"executionInfo":{"status":"ok","timestamp":1762075682320,"user_tz":-330,"elapsed":7208,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}}},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00 Dict[str, Any]:\n"," \"\"\"Query the RAG system with advanced configuration.\"\"\"\n","\n"," # Check if index exists\n"," if self.index is None:\n"," return {\"response\": \"โŒ Please initialize the database first!\", \"sources\": [], \"config\": {}}\n","\n"," # Check if question is empty\n"," if not question or not question.strip():\n"," return {\"response\": \"โš ๏ธ Please enter a question first!\", \"sources\": [], \"config\": {}}\n","\n"," try:\n"," # Update settings with new parameters\n"," self.update_settings(model, temperature, chunk_size, chunk_overlap)\n","\n"," # Get postprocessors\n"," postprocessors = []\n"," for name in postprocessor_names:\n"," processor = self.get_postprocessor(name, similarity_cutoff)\n"," if processor is not None:\n"," postprocessors.append(processor)\n","\n"," # Get synthesizer\n"," synthesizer = self.get_synthesizer(synthesizer_name)\n","\n"," # Create query engine with all parameters\n"," query_engine_kwargs = {\"similarity_top_k\": similarity_top_k}\n"," if postprocessors:\n"," query_engine_kwargs[\"node_postprocessors\"] = postprocessors\n"," if synthesizer is not None:\n"," query_engine_kwargs[\"response_synthesizer\"] = synthesizer\n","\n"," query_engine = self.index.as_query_engine(**query_engine_kwargs)\n","\n"," # Query and get response\n"," response = query_engine.query(question)\n","\n"," # Extract source information if available\n"," sources = []\n"," if hasattr(response, 'source_nodes'):\n"," for node in response.source_nodes:\n"," sources.append({\n"," \"text\": node.text[:200] + \"...\",\n"," \"score\": getattr(node, 'score', 0.0),\n"," \"source\": getattr(node.node, 'metadata', {}).get('file_name', 'Unknown')\n"," })\n","\n"," return {\n"," \"response\": str(response),\n"," \"sources\": sources,\n"," \"config\": {\n"," \"model\": model,\n"," \"temperature\": temperature,\n"," \"chunk_size\": chunk_size,\n"," \"chunk_overlap\": chunk_overlap,\n"," \"similarity_top_k\": similarity_top_k,\n"," \"postprocessors\": postprocessor_names,\n"," \"similarity_cutoff\": similarity_cutoff,\n"," \"synthesizer\": synthesizer_name\n"," }\n"," }\n","\n"," except Exception as e:\n"," return {\"response\": f\"โŒ Error processing query: {str(e)}\", \"sources\": [], \"config\": {}}\n","\n","# Initialize the backend\n","rag_backend = AdvancedRAGBackend()\n","print(\"๐Ÿš€ Advanced RAG Backend initialized and ready!\")\n"]},{"cell_type":"markdown","metadata":{"id":"AGddruoS1LFm"},"source":["## ๐ŸŽจ Part 3: Advanced Gradio Interface\n","\n","Create a sophisticated Gradio interface with all the configuration options specified:\n","1. Database initialization button\n","2. Search query input and button \n","3. Model selection dropdown\n","4. Temperature slider\n","5. Chunk size and overlap inputs\n","6. Similarity top-k slider\n","7. Node postprocessor multiselect\n","8. Similarity cutoff slider\n","9. Response synthesizer multiselect\n"]},{"cell_type":"code","execution_count":4,"metadata":{"id":"rh2I0fbv1LFn","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1762075687808,"user_tz":-330,"elapsed":211,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"fa71258e-415f-4ceb-9a5e-55abed53470b"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG interface created successfully!\n"]}],"source":["def create_advanced_rag_interface():\n"," \"\"\"Create advanced RAG interface with full configuration options.\"\"\"\n","\n"," def initialize_db():\n"," \"\"\"Handle database initialization.\"\"\"\n"," return rag_backend.initialize_database()\n","\n"," def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer):\n"," \"\"\"Handle advanced RAG queries with all configuration options.\"\"\"\n"," result = rag_backend.advanced_query(\n"," question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer\n"," )\n","\n"," # Format configuration for display\n"," config_text = f\"\"\"**Current Configuration:**\n","- Model: {result['config'].get('model', 'N/A')}\n","- Temperature: {result['config'].get('temperature', 'N/A')}\n","- Chunk Size: {result['config'].get('chunk_size', 'N/A')}\n","- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')}\n","- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')}\n","- Postprocessors: {', '.join(result['config'].get('postprocessors', []))}\n","- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')}\n","- Synthesizer: {result['config'].get('synthesizer', 'N/A')}\"\"\"\n","\n"," return result[\"response\"], config_text\n","\n"," # TODO: Create the advanced interface structure\n"," # Hint: This interface needs more complex layout with configuration controls\n","\n"," with gr.Blocks(title=\"Advanced RAG Assistant\") as interface:\n"," # TODO: Add title and description\n"," # Hint: Use gr.Markdown() for formatted text\n","\n"," # Your title and description here:\n"," title = gr.Markdown(\"# Advanced RAG Assistant\")\n"," description = gr.Markdown(\"## This is an advanced RAG Assistant!\")\n","\n","\n"," # TODO: Add database initialization section\n"," # Hint: Use gr.Button() for initialization and gr.Textbox() for status\n"," init_btn = gr.Button(\"Initialize Vector Database\")\n"," status_output = gr.Textbox(label=\"Database Initialization Status\")\n","\n","\n"," # TODO: Create main layout with columns\n"," # Hint: Configuration controls on left, query/response on right makes sense\n"," # Use gr.Row() and gr.Column() to organize this\n","\n"," with gr.Row():\n"," with gr.Column(scale=1):\n","\n"," gr.Markdown(\"### โš™๏ธ RAG Configuration\")\n","\n"," # TODO: Model selection\n"," # Hint: Use gr.Dropdown() with choices=[\"gpt-4o\", \"gpt-4o-mini\"]\n"," model_dropdown = gr.Dropdown([\"gpt-4o\", \"gpt-4o-mini\"], label=\"Model\")\n","\n","\n"," # TODO: Temperature control\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.1\n"," temperature_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.1, label=\"Temperature\")\n","\n","\n"," # TODO: Chunking parameters\n"," # Hint: Use gr.Number() for numeric inputs with default values\n"," # chunk_size_input = ? (default 512)\n"," chunk_size_input = gr.Number(value=512, label=\"Chunk Size\")\n","\n"," # chunk_overlap_input = ? (default 50)\n"," chunk_overlap_input = gr.Number(value=50, label=\"Chunk Overlap\")\n","\n","\n"," # TODO: Retrieval parameters\n"," # Hint: Use gr.Slider() with minimum=1, maximum=20, step=1, value=5\n"," similarity_topk_slider = gr.Slider(minimum=1, maximum=20, step=1, value=5, label=\"Similarity Top-K\")\n","\n","\n"," # TODO: Postprocessor selection\n"," # Hint: Use gr.CheckboxGroup() with choices=[\"SimilarityPostprocessor\"]\n"," postprocessor_checkbox = gr.CheckboxGroup([\"SimilarityPostprocessor\"], label=\"Postprocessors\")\n","\n","\n"," # TODO: Similarity filtering\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.3\n"," similarity_cutoff_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.3, label=\"Similarity Cutoff\")\n","\n","\n"," # TODO: Response synthesizer\n"," # Hint: Use gr.Dropdown() with choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"]\n"," synthesizer_dropdown = gr.Dropdown([\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"], label=\"Synthesizer\")\n","\n","\n"," with gr.Column(scale=2):\n"," gr.Markdown(\"### ๐Ÿ’ฌ Query Interface\")\n","\n"," # TODO: Query input\n"," # Hint: Use gr.Textbox() with label=\"Ask a question\", placeholder text, lines=3\n"," query_input = gr.Textbox(label=\"Ask a question\", placeholder=\"Enter your question here...\", lines=3)\n","\n","\n"," # TODO: Submit button\n"," # Hint: Use gr.Button() with variant=\"primary\"\n"," submit_btn = gr.Button(\"Ask Question\", variant=\"primary\")\n","\n","\n"," # TODO: Response output\n"," # Hint: Use gr.Textbox() with lines=12, interactive=False\n"," response_output = gr.Textbox(lines=12, interactive=False, label=\"Response\")\n","\n","\n"," # TODO: Configuration display\n"," # Hint: Use gr.Textbox() with lines=8, interactive=False\n"," config_display = gr.Textbox(lines=8, interactive=False, label=\"Configuration\")\n","\n","\n"," # Uncomment to Connect functions to components\n"," init_btn.click(initialize_db, outputs=[status_output])\n","\n"," submit_btn.click(\n"," handle_advanced_query,\n"," inputs=[\n"," query_input, model_dropdown, temperature_slider,\n"," chunk_size_input, chunk_overlap_input, similarity_topk_slider,\n"," postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown\n"," ],\n"," outputs=[response_output, config_display]\n"," )\n","\n","\n"," return interface\n","\n","# Create the interface\n","advanced_interface = create_advanced_rag_interface()\n","print(\"โœ… Advanced RAG interface created successfully!\")"]},{"cell_type":"markdown","metadata":{"id":"a37UVCMR1LFn"},"source":["## ๐Ÿš€ Part 4: Launch Your Advanced Application\n","\n","Launch your advanced Gradio application and test all the configuration options!\n"]},{"cell_type":"code","execution_count":5,"metadata":{"id":"jfDJSLyI1LFo","colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"status":"ok","timestamp":1762075696082,"user_tz":-330,"elapsed":1359,"user":{"displayName":"Ashish Sahu","userId":"17845003615483429481"}},"outputId":"6fba2caa-d432-46cb-a2f8-909c5ea42f28"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐ŸŽ‰ Launching your Advanced RAG Assistant...\n","๐Ÿ”— Your application will open in a new browser tab!\n","\n","โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\n","\n","๐Ÿ“‹ Testing Instructions:\n","1. Click 'Initialize Vector Database' button first\n","2. Wait for success message\n","3. Configure your RAG parameters:\n"," - Choose model (gpt-4o, gpt-4o-mini)\n"," - Adjust temperature (0.0 = deterministic, 1.0 = creative)\n"," - Set chunk size and overlap\n"," - Choose similarity top-k\n"," - Select postprocessors and synthesizer\n","4. Enter a question and click 'Ask Question'\n","5. Review both the response and configuration used\n","\n","๐Ÿงช Experiments to try:\n","- Compare different models with the same question\n","- Test temperature effects (0.1 vs 0.9)\n","- Try different chunk sizes (256 vs 1024)\n","- Compare synthesizers (TreeSummarize vs Refine)\n","- Adjust similarity cutoff to filter results\n","It looks like you are running Gradio on a hosted Jupyter notebook, which requires `share=True`. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n","\n","Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n","* Running on public URL: https://3938e2fd2c7493020d.gradio.live\n","\n","This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"]},{"output_type":"display_data","data":{"text/plain":[""],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":5}],"source":["print(\"๐ŸŽ‰ Launching your Advanced RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Vector Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Configure your RAG parameters:\")\n","print(\" - Choose model (gpt-4o, gpt-4o-mini)\")\n","print(\" - Adjust temperature (0.0 = deterministic, 1.0 = creative)\")\n","print(\" - Set chunk size and overlap\")\n","print(\" - Choose similarity top-k\")\n","print(\" - Select postprocessors and synthesizer\")\n","print(\"4. Enter a question and click 'Ask Question'\")\n","print(\"5. Review both the response and configuration used\")\n","print(\"\")\n","print(\"๐Ÿงช Experiments to try:\")\n","print(\"- Compare different models with the same question\")\n","print(\"- Test temperature effects (0.1 vs 0.9)\")\n","print(\"- Try different chunk sizes (256 vs 1024)\")\n","print(\"- Compare synthesizers (TreeSummarize vs Refine)\")\n","print(\"- Adjust similarity cutoff to filter results\")\n","\n","# Your code here:\n","advanced_interface.launch()"]},{"cell_type":"markdown","metadata":{"id":"QI1P5G7L1LFo"},"source":["## ๐Ÿ’ก Understanding the Configuration Options\n","\n","### Model Selection\n","- **gpt-4o**: Latest and most capable model, best quality responses\n","- **gpt-4o-mini**: Faster and cheaper while maintaining good quality\n","\n","### Temperature (0.0 - 1.0)\n","- **0.0-0.3**: Deterministic, factual responses\n","- **0.4-0.7**: Balanced creativity and accuracy\n","- **0.8-1.0**: More creative and varied responses\n","\n","### Chunk Size & Overlap\n","- **Chunk Size**: How much text to process at once (256-1024 typical)\n","- **Chunk Overlap**: Overlap between chunks to maintain context (10-100 typical)\n","\n","### Similarity Top-K (1-20)\n","- **Lower values (3-5)**: More focused, faster responses\n","- **Higher values (8-15)**: More comprehensive, detailed responses\n","\n","### Node Postprocessors\n","- **SimilarityPostprocessor**: Filters out low-relevance documents\n","\n","### Similarity Cutoff (0.0-1.0)\n","- **0.1-0.3**: More permissive, includes potentially relevant docs\n","- **0.5-0.8**: More strict, only highly relevant docs\n","\n","### Response Synthesizers\n","- **TreeSummarize**: Hierarchical summarization, good for complex topics\n","- **Refine**: Iterative refinement, builds detailed responses\n","- **CompactAndRefine**: Efficient version of Refine\n","- **Default**: Standard synthesis approach\n"]},{"cell_type":"markdown","metadata":{"id":"JClPY3ed1LFp"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [ ] Set up your OPENROUTER_API_KEY environment variable\n","- [ ] Imported all necessary libraries including advanced RAG components\n","- [ ] Created AdvancedRAGBackend class with configurable parameters\n","- [ ] Implemented all required methods:\n"," - [ ] `update_settings()` - Updates LLM and chunking parameters\n"," - [ ] `initialize_database()` - Sets up vector database\n"," - [ ] `get_postprocessor()` - Returns selected postprocessor\n"," - [ ] `get_synthesizer()` - Returns selected synthesizer\n"," - [ ] `advanced_query()` - Handles queries with all configuration options\n","- [ ] Created advanced Gradio interface with all required components:\n"," - [ ] Initialize database button\n"," - [ ] Model selection dropdown (gpt-4o, gpt-4o-mini)\n"," - [ ] Temperature slider (0 to 1, step 0.1)\n"," - [ ] Chunk size input (default 512)\n"," - [ ] Chunk overlap input (default 50)\n"," - [ ] Similarity top-k slider (1 to 20, default 5)\n"," - [ ] Node postprocessor multiselect\n"," - [ ] Similarity cutoff slider (0.0 to 1.0, step 0.1, default 0.3)\n"," - [ ] Response synthesizer dropdown\n"," - [ ] Query input and submit button\n"," - [ ] Response output\n"," - [ ] Configuration display\n","- [ ] Connected all components to backend functions\n","- [ ] Successfully launched the application\n","- [ ] Tested different parameter combinations\n","- [ ] Verified all configuration options work correctly\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built a professional, production-ready RAG application! You now have:\n","\n","- **Advanced Parameter Control**: Full control over all RAG system parameters\n","- **Professional UI**: Clean, organized interface with proper layout\n","- **Real-time Configuration**: Ability to experiment with different settings\n","- **Production Patterns**: Understanding of how to build scalable AI applications\n","\n","## ๐Ÿš€ Next Steps & Extensions\n","\n","**Potential Enhancements:**\n","1. **Authentication**: Add user login and session management\n","2. **Document Upload**: Allow users to upload their own documents\n","3. **Chat History**: Implement conversation memory\n","4. **Performance Monitoring**: Add response time and quality metrics\n","5. **A/B Testing**: Compare different configurations side-by-side\n","6. **Export Features**: Download responses and configurations\n","7. **Advanced Visualizations**: Show document similarity scores and retrieval paths\n","\n","**Deployment Options:**\n","- **Local**: Run on your machine for development\n","- **Gradio Cloud**: Deploy with `interface.launch(share=True)`\n","- **Hugging Face Spaces**: Deploy to Hugging Face for public access\n","- **Docker**: Containerize for scalable deployment\n","- **Cloud Platforms**: Deploy to AWS, GCP, or Azure\n","\n","You're now ready to build sophisticated AI-powered applications!\n"]}],"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[{"file_id":"https://github.com/ashisa4e/ai-accelerator-C2/blob/main/Day_6/session_2/assignments/assignment_3b_advanced_gradio_rag.ipynb","timestamp":1762074615788}],"gpuType":"T4"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"f63d9b0bc8d84a7e8c3dde8092748efb":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_fff8a5e6f2584c11ae92669e90149b55","IPY_MODEL_9c45c10affea4bd38fa42d018336a1ae","IPY_MODEL_d6bbb69e7ac245059fc5b43367098035"],"layout":"IPY_MODEL_5d44834a81b049faa99d15b0ddea1822"}},"fff8a5e6f2584c11ae92669e90149b55":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bf96d6969e5f47f488aa470a798ace3b","placeholder":"โ€‹","style":"IPY_MODEL_0fa82b947b7c4a9db115fa0aae66b699","value":"modules.json:โ€‡100%"}},"9c45c10affea4bd38fa42d018336a1ae":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5968af086e2f45f2b80eb98d9c7d6b07","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_e55ff8ac1ece44fe93396642a8f69cf5","value":349}},"d6bbb69e7ac245059fc5b43367098035":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_70dcd0c31cc343ceac79ee5b9e279d16","placeholder":"โ€‹","style":"IPY_MODEL_30c784db2c7d40d197bba8bb7324e851","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡38.0kB/s]"}},"5d44834a81b049faa99d15b0ddea1822":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bf96d6969e5f47f488aa470a798ace3b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0fa82b947b7c4a9db115fa0aae66b699":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5968af086e2f45f2b80eb98d9c7d6b07":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e55ff8ac1ece44fe93396642a8f69cf5":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"70dcd0c31cc343ceac79ee5b9e279d16":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"30c784db2c7d40d197bba8bb7324e851":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"8faf85a59cb74247bc3d578f9ad72556":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_67a93fb500c6448e87e8cd513af1b84b","IPY_MODEL_4ed87ce306114c69bcb2a8f3ef85d036","IPY_MODEL_fe2896eff96b46b2b1189ef064917933"],"layout":"IPY_MODEL_d97bd850ff314cc99dac0ac1ad538bcf"}},"67a93fb500c6448e87e8cd513af1b84b":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8829a5910f244946adf2acf7adbb0e52","placeholder":"โ€‹","style":"IPY_MODEL_284db2b5bac74176b9f379065a5b1b8b","value":"config_sentence_transformers.json:โ€‡100%"}},"4ed87ce306114c69bcb2a8f3ef85d036":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_2e69ddccea2544819b62883cb452a4ed","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4f949c97f4c64f33bf542e5256a47801","value":124}},"fe2896eff96b46b2b1189ef064917933":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8cd06e3730b4407aabb2ac08c1333c97","placeholder":"โ€‹","style":"IPY_MODEL_06f033ed179a455a97b70c76b94d2e5e","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡14.1kB/s]"}},"d97bd850ff314cc99dac0ac1ad538bcf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8829a5910f244946adf2acf7adbb0e52":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"284db2b5bac74176b9f379065a5b1b8b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2e69ddccea2544819b62883cb452a4ed":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4f949c97f4c64f33bf542e5256a47801":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8cd06e3730b4407aabb2ac08c1333c97":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"06f033ed179a455a97b70c76b94d2e5e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0f7d7381db954c048c248ca08413663d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0b9a45dd94c245a0ba57fffdd5727987","IPY_MODEL_b5816c37f368443db8fb660e5c477049","IPY_MODEL_047a76c191f24bac981c0185c60212fb"],"layout":"IPY_MODEL_f30466d966ea43a79c32811b4947b8a7"}},"0b9a45dd94c245a0ba57fffdd5727987":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_d37c57ea9e174b4b8c2543d32766d0dc","placeholder":"โ€‹","style":"IPY_MODEL_d42d2de19c944348939b82fa7a7a499f","value":"README.md:โ€‡"}},"b5816c37f368443db8fb660e5c477049":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_575a711d6dde466282b3e28902a4d5aa","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b096c1215b294f84a3a577492614fa8f","value":1}},"047a76c191f24bac981c0185c60212fb":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_60b5b36707dc420280feffe69d8df9a6","placeholder":"โ€‹","style":"IPY_MODEL_146c38e89bd5461aa57f899e8b2d4f5e","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡7.45MB/s]"}},"f30466d966ea43a79c32811b4947b8a7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d37c57ea9e174b4b8c2543d32766d0dc":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d42d2de19c944348939b82fa7a7a499f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"575a711d6dde466282b3e28902a4d5aa":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"b096c1215b294f84a3a577492614fa8f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"60b5b36707dc420280feffe69d8df9a6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"146c38e89bd5461aa57f899e8b2d4f5e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3560cb2099bf483b82aa4b9788f85fe2":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0ac1c922a0284af59cdaf31eef0593c8","IPY_MODEL_1ab1773aa2954ec68989c11522b39d5a","IPY_MODEL_f7001509bce4443fa8a23f2f6df098f4"],"layout":"IPY_MODEL_e0516b54f7c34526bdda4570d41e2e33"}},"0ac1c922a0284af59cdaf31eef0593c8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a2ae6f2393af443e955509461fe5574c","placeholder":"โ€‹","style":"IPY_MODEL_6a65e3300966430da3087f53b7de43af","value":"sentence_bert_config.json:โ€‡100%"}},"1ab1773aa2954ec68989c11522b39d5a":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6217eb450c1d4c5086cd9a362cc07fb5","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c435ed6cc9c142708ccfe7ba230f47d8","value":52}},"f7001509bce4443fa8a23f2f6df098f4":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0607b07be7334929bbba5d7777853fb6","placeholder":"โ€‹","style":"IPY_MODEL_2c7898f5cd0d4bffa96981c66b057349","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡5.37kB/s]"}},"e0516b54f7c34526bdda4570d41e2e33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a2ae6f2393af443e955509461fe5574c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6a65e3300966430da3087f53b7de43af":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6217eb450c1d4c5086cd9a362cc07fb5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c435ed6cc9c142708ccfe7ba230f47d8":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0607b07be7334929bbba5d7777853fb6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2c7898f5cd0d4bffa96981c66b057349":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5cc55270cf5f4fb29f0ae6d5dfa7fd2d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_3500338d17a94911b62f2040e3414abf","IPY_MODEL_38532cc922f24e21880a136c390375f6","IPY_MODEL_21539a7ae6e4444c84787537dd38b033"],"layout":"IPY_MODEL_67ebb27f52ee450f9b57c1c7f7ee9d41"}},"3500338d17a94911b62f2040e3414abf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_97e80744aa8a43f6ba7bde74b7678bc7","placeholder":"โ€‹","style":"IPY_MODEL_011e63116b8b4f28b7fa86a79e186906","value":"config.json:โ€‡100%"}},"38532cc922f24e21880a136c390375f6":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e0a5378ea1f1448a972d29c8c7b6944c","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4666ff409d9343f0b09ca1b6af423928","value":743}},"21539a7ae6e4444c84787537dd38b033":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3e0a30dd64524579b25a7778930b5918","placeholder":"โ€‹","style":"IPY_MODEL_d995f28420e841dd8aa67faebcac3e8f","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡80.7kB/s]"}},"67ebb27f52ee450f9b57c1c7f7ee9d41":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"97e80744aa8a43f6ba7bde74b7678bc7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"011e63116b8b4f28b7fa86a79e186906":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e0a5378ea1f1448a972d29c8c7b6944c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4666ff409d9343f0b09ca1b6af423928":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"3e0a30dd64524579b25a7778930b5918":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d995f28420e841dd8aa67faebcac3e8f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ac684e852984462b81d000b8730b2ed9":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_3d953152b1d34d9eab6f2473d8004c50","IPY_MODEL_942410d2ac2b4fe49e889a00738e0fd9","IPY_MODEL_67ff85788637456490392ba903c3e199"],"layout":"IPY_MODEL_2f0ef2b89fa0472fa3c7cafef0de6b14"}},"3d953152b1d34d9eab6f2473d8004c50":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_19eba7a723444c30ac8be9577d838e46","placeholder":"โ€‹","style":"IPY_MODEL_d75f9de26eeb4682a46d3ce413a22e9a","value":"model.safetensors:โ€‡100%"}},"942410d2ac2b4fe49e889a00738e0fd9":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3f8c9aa883634411a70a0169392cbbf1","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_5474ba18dfd242b581c80fe3b6d15799","value":133466304}},"67ff85788637456490392ba903c3e199":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a7f819f13a794657b3c87bf765bcec6e","placeholder":"โ€‹","style":"IPY_MODEL_32f40e38d5e04d9e9a093061e120288a","value":"โ€‡133M/133Mโ€‡[00:01<00:00,โ€‡133MB/s]"}},"2f0ef2b89fa0472fa3c7cafef0de6b14":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"19eba7a723444c30ac8be9577d838e46":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d75f9de26eeb4682a46d3ce413a22e9a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3f8c9aa883634411a70a0169392cbbf1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5474ba18dfd242b581c80fe3b6d15799":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a7f819f13a794657b3c87bf765bcec6e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"32f40e38d5e04d9e9a093061e120288a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5ee691f97b44433694fa9b3d92cf9b9d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5d519cb6b7b54072a36fc765c47cd4d8","IPY_MODEL_5cdcb1e596b741ec817de51bdcb3a2c2","IPY_MODEL_2a3d187866bd45e390b2649cf014d530"],"layout":"IPY_MODEL_3e4e3d854fc04d5988224511bba1314f"}},"5d519cb6b7b54072a36fc765c47cd4d8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b7b3765e34754470900d14d92b5981de","placeholder":"โ€‹","style":"IPY_MODEL_187bd0c0d9a54535bbda26a4cb1c554c","value":"tokenizer_config.json:โ€‡100%"}},"5cdcb1e596b741ec817de51bdcb3a2c2":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_8cda645cb6894bb7b50e86f46bcb8550","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cef23dae0dab4d9bad8d2389ef59bed4","value":366}},"2a3d187866bd45e390b2649cf014d530":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_cf4cc2efff4c47058932f1726dc91aa5","placeholder":"โ€‹","style":"IPY_MODEL_0110ce6585884566b2288cc38777fc08","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡37.9kB/s]"}},"3e4e3d854fc04d5988224511bba1314f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b7b3765e34754470900d14d92b5981de":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"187bd0c0d9a54535bbda26a4cb1c554c":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"8cda645cb6894bb7b50e86f46bcb8550":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cef23dae0dab4d9bad8d2389ef59bed4":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"cf4cc2efff4c47058932f1726dc91aa5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0110ce6585884566b2288cc38777fc08":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"daf78560c8f7432b8c3a17c903315e9e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_417adb1b6b924af0a0a928e0ed85ac47","IPY_MODEL_662b8533613941b88a9add3a92e86ff0","IPY_MODEL_9dfaa0fa00aa4c1ea07a2df46d22db7c"],"layout":"IPY_MODEL_e1e7d73adf3945228ee21e5616e84b3c"}},"417adb1b6b924af0a0a928e0ed85ac47":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4b935025dbba4d9db62661bb2a1967dd","placeholder":"โ€‹","style":"IPY_MODEL_7f62a70857c34e30ad404e99991d07e8","value":"vocab.txt:โ€‡"}},"662b8533613941b88a9add3a92e86ff0":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_10913e7c9e274b329e84e8b7e3e6296a","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_09b91ef09bba405fb57babb479941496","value":1}},"9dfaa0fa00aa4c1ea07a2df46d22db7c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_699aa016e17a4ec4bedcb0f80d53894d","placeholder":"โ€‹","style":"IPY_MODEL_df772d0a1ccf44a6884d5667108fa78e","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡7.90MB/s]"}},"e1e7d73adf3945228ee21e5616e84b3c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4b935025dbba4d9db62661bb2a1967dd":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7f62a70857c34e30ad404e99991d07e8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"10913e7c9e274b329e84e8b7e3e6296a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"09b91ef09bba405fb57babb479941496":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"699aa016e17a4ec4bedcb0f80d53894d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"df772d0a1ccf44a6884d5667108fa78e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"563da780ce984bd392843d988aa8556d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_2d433fdd51334661bb1b0b322f68eade","IPY_MODEL_c616936be96f469bbc4d0f6da8ea27c7","IPY_MODEL_8d0c67aedc5d4155b7b5a41f7000d4af"],"layout":"IPY_MODEL_801e6d4e2da94ee6a179964994005e0d"}},"2d433fdd51334661bb1b0b322f68eade":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e766ff5d98e44979bc4716925c888377","placeholder":"โ€‹","style":"IPY_MODEL_1664d1ab377c411bbacb5b6a9066e688","value":"tokenizer.json:โ€‡"}},"c616936be96f469bbc4d0f6da8ea27c7":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_f0046f18629f41f5819c87d95797f0c6","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d4fcebe817e74bfdadce92f57ddc2bcc","value":1}},"8d0c67aedc5d4155b7b5a41f7000d4af":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ab83e2c2823545b5a55c4bf17849318b","placeholder":"โ€‹","style":"IPY_MODEL_3871bf8d590b49e294bd1d3d163370fe","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡42.8MB/s]"}},"801e6d4e2da94ee6a179964994005e0d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e766ff5d98e44979bc4716925c888377":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1664d1ab377c411bbacb5b6a9066e688":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f0046f18629f41f5819c87d95797f0c6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"d4fcebe817e74bfdadce92f57ddc2bcc":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ab83e2c2823545b5a55c4bf17849318b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3871bf8d590b49e294bd1d3d163370fe":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4f478822c02145cfb8d2760c30c884aa":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f2122cbd970945718c7ac59e9c35b3bc","IPY_MODEL_9275ef9315cd4d01ba2f51f3e0382036","IPY_MODEL_56b023c9af1a4ff48e5eaed399b328b1"],"layout":"IPY_MODEL_dc6fd15fdf304f35832ef7d5d8c9873a"}},"f2122cbd970945718c7ac59e9c35b3bc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_771b4272701046c4a903d2ba63c12b9c","placeholder":"โ€‹","style":"IPY_MODEL_34ad318ff32045e89111e9d28ec3c257","value":"special_tokens_map.json:โ€‡100%"}},"9275ef9315cd4d01ba2f51f3e0382036":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_4e2bf7b0b639459999c015e143301c32","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c47bd68998a34c02b5a0c0ae6fba1faf","value":125}},"56b023c9af1a4ff48e5eaed399b328b1":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ae43e3f64c264b3290d450eefd79ee99","placeholder":"โ€‹","style":"IPY_MODEL_fd4441c169e746f895c545249be968ac","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡12.3kB/s]"}},"dc6fd15fdf304f35832ef7d5d8c9873a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"771b4272701046c4a903d2ba63c12b9c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"34ad318ff32045e89111e9d28ec3c257":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4e2bf7b0b639459999c015e143301c32":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c47bd68998a34c02b5a0c0ae6fba1faf":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ae43e3f64c264b3290d450eefd79ee99":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fd4441c169e746f895c545249be968ac":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"870874be9a3e440f98ee0b13b462c0e7":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_3606e41741d94df8ada91ac7a7a375da","IPY_MODEL_c028ad2ae17541feb74b3b995851e164","IPY_MODEL_a2452ec0dcf54c41bf2bcec5ce260983"],"layout":"IPY_MODEL_901f0526f262469cb8d9bbbd8682e7b6"}},"3606e41741d94df8ada91ac7a7a375da":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_237286670806486cb055335ffda9d4bc","placeholder":"โ€‹","style":"IPY_MODEL_bd79ec57b44f41d4a4d7870df461f038","value":"config.json:โ€‡100%"}},"c028ad2ae17541feb74b3b995851e164":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_99c4e470958045799ef41046249c4f04","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_f6f9f28b9e144850977722f470a8b2d9","value":190}},"a2452ec0dcf54c41bf2bcec5ce260983":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5ddb850ca36f463c94cbff636ed2b91c","placeholder":"โ€‹","style":"IPY_MODEL_c5d7426c52b04b86a32a38984aee3f7b","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡19.6kB/s]"}},"901f0526f262469cb8d9bbbd8682e7b6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"237286670806486cb055335ffda9d4bc":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bd79ec57b44f41d4a4d7870df461f038":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"99c4e470958045799ef41046249c4f04":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f6f9f28b9e144850977722f470a8b2d9":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5ddb850ca36f463c94cbff636ed2b91c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c5d7426c52b04b86a32a38984aee3f7b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Ashish_Sahu/Day_06/README.md b/Ashish_Sahu/Day_06/README.md deleted file mode 100644 index d314229..0000000 --- a/Ashish_Sahu/Day_06/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Day 06 Submission - -All 4 assignments completed! \ No newline at end of file diff --git a/Ashish_Sahu/README.md b/Ashish_Sahu/README.md deleted file mode 100644 index 25a1330..0000000 --- a/Ashish_Sahu/README.md +++ /dev/null @@ -1 +0,0 @@ -# Ashish_Sahu diff --git a/Avinash_Behera/README.md b/Avinash_Behera/README.md deleted file mode 100644 index 942e27e..0000000 --- a/Avinash_Behera/README.md +++ /dev/null @@ -1 +0,0 @@ -# Avinash_Behera diff --git a/CORINTHIAN_CARPENTER/README.md b/CORINTHIAN_CARPENTER/README.md deleted file mode 100644 index e189edd..0000000 --- a/CORINTHIAN_CARPENTER/README.md +++ /dev/null @@ -1 +0,0 @@ -# CORINTHIAN_CARPENTER diff --git a/Chandra_Sekhar_Yandra/Day_7/assignment_1_vector_db_basics.ipynb b/Chandra_Sekhar_Yandra/Day_7/assignment_1_vector_db_basics.ipynb deleted file mode 100644 index 2b0c149..0000000 --- a/Chandra_Sekhar_Yandra/Day_7/assignment_1_vector_db_basics.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"sFRtRG5BY7X-"},"source":["# Assignment 1: Vector Database Creation and Retrieval\n","## Day 6 Session 2 - RAG Fundamentals\n","\n","**OBJECTIVE:** Create a vector database from a folder of documents and implement basic retrieval functionality.\n","\n","**LEARNING GOALS:**\n","- Understand document loading with SimpleDirectoryReader\n","- Learn vector store setup with LanceDB\n","- Implement vector index creation\n","- Perform semantic search and retrieval\n","\n","**DATASET:** Use the data folder in `Day_6/session_2/data/` which contains multiple file types\n","\n","**INSTRUCTIONS:**\n","1. Complete each function by replacing the TODO comments with actual implementation\n","2. Run each cell after completing the function to test it\n","3. The answers can be found in the existing notebooks in the `llamaindex_rag/` folder\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"aNWKdUCoZIVZ","executionInfo":{"status":"ok","timestamp":1762067996807,"user_tz":-60,"elapsed":1303,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"20c326b2-8481-4d2c-c59c-ba6ef6473983"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"]}]},{"cell_type":"code","source":["!pip install -r \"/content/drive/MyDrive/OutSkill/session_2/requirements.txt\""],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"wWFuPqeEZpV8","executionInfo":{"status":"ok","timestamp":1762068036366,"user_tz":-60,"elapsed":35758,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"bc7c226f-0eab-411a-865a-cecec7822351"},"execution_count":4,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m35.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m19.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m44.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m88.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m60.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m110.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m30.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m11.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m94.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m10.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m32.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m25.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=30e2cc26d083dcbb764e10c17a6c773bec2b0abf527b83570eace38404b129d9\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"ff0ed4b6ad5b43c1b3fd3c5e0ddc3671"}},"metadata":{}}]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"id":"LRt2SxvAY7YH","executionInfo":{"status":"ok","timestamp":1762068079334,"user_tz":-60,"elapsed":28310,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"ca88b692-8ade-428b-d3b1-43fe9571286e"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import os\n","from pathlib import Path\n","from typing import List\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","\n","print(\"โœ… Libraries imported successfully!\")"]},{"cell_type":"code","execution_count":6,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":404,"referenced_widgets":["9e50478b746f4095b7c7e584fcb3088d","fcae1aa0d3e84a178df935f375c93e4a","6f741c697a2f4a06acdfad95ee5457d0","266528d476924186adcea12699d9a7cd","fede8bdc559749cb81d54c180bb406b1","7637d0bb79344cfb9570b9450165e0d0","9e84fc487d57420c9f860ce017383908","e5340745d1934ae9b22ab43a69610398","0b9f6855fe2d45768b93a386c7a25da4","acc90bd33aa144c3b0674c89edf6b94c","aa4325add78b4b8197eba8174b57918d","3a40f9562a364c0c8b7a7187cd2caf51","0c7c3c4f417c49dabd74742ae365647f","cf14d4e0bfc340f3856dcece2982ba63","1b2f883821cf41c6918201b4deef7ceb","4f3e33c0312b40baa3113af9259966cf","bcac30ed6a7e4c79bb442a7cc4cd2457","7a6d9d888fc64026b6483f528ba84cb9","c79c380255ff4c58bbfbbd934d8d2743","fa3d45b5e2074ffaa3f32ab9437414ce","e2480e0d445a4409b4977e82f40d2481","ce8321a911b84217840056979cc6d41e","eb035deef8614a3b8d433091ae253b9a","f2000bf68e634eca8c7430bfac1a7900","db756719baf84343b743d1e3aaf98338","f43baba407114a278689435b617a8210","cd07ef1676cd49d8a645ef51bc3cf458","cabc44983edd497194caf9b44f1718e9","c8ce4fa3da1e464d8cd73b5b70c0125b","a4f1c98204024e5792895bc8265fb2bf","f40edab5b5aa49abbb319bb4b59e5842","e6e476193b8b43eca83acc1a02286f6e","5b33d26d9ad54326a9d0c7ac77b8fbb7","9501ea4e0ece426abb114815a95b9001","a431f56dd056401ebae43404494de811","4832fe0c25644238adb50317f004097e","9073396ffd41469e9cbcf325d2d505ce","bf14eed27ab1454b99a1022e909b0baf","e4a7de2364184104bededc2017ad6b42","0c865a56206c4324bb10d0fdad0b43f4","93fe7af989da4faaac2e397ac5b94122","e873fbb7e8a446bb818c49399492b813","64003df16f9844589932c51bc9a4f5a7","690ddc29ac104de7923dcb7c80878e99","dec120f4d2404ef9bc9cccb3f8ab515b","c4a53fcfb8bc45bd8e18c2137807c676","0aa98de2f80b4aac8468275cb5dc8eae","7ea01404bc31440ba376fa0dc700204d","f81aa3d28ad54a14a15c2d9468f22825","87403a24e98d413da8407284eac4a46b","db145f2a0e504b3daf7a365a5b65b49f","01d8c63b060148cb9a8d7f9683c64218","9665b9e748684749818c68beb2e608da","f2ac459fb4304d4099e052c2b2bf6127","ee3bfa4c356c4bc8bc6437b2eb74309b","ca805584cacd406c890f91744c9813dc","9d8182156e6b41a38749498f67f27171","fb612d03164a41619950513ef7bd77f8","6188f928c0f3437680b560568045ae0a","296c202a6892462e9b9cbbfd3f82dfc8","16c6d277c9794285aea4a361fbecc0fe","f8431fd10df24c0db43759f2075af8aa","d67b9c2da15d48b18b5bf31e827b843e","4a615c94f5f04528afb91c65b56bebc8","2572960d47ed4e94a3cd09e83b9c57f6","6691a3ea1e064b69a9654b351f55cbd5","b3d20a576ede4d4e907ffef4cb30910d","8f84afe876454077b0d7472de3c424f4","4a3e812dc1e04ff593f93abad144e715","317a77986c504394848a588dfe6d421a","68f86cda0bd4417d8673000efce5ca8d","3f7032e473e14141b81b2e597dd097ea","ddb825ce1db94ab7a104d5746e71227b","20861a3550d748308726969ee00554a3","6ab139427bc14eae94f530d49b4eab26","e8de95e8ee784964b713bdb2b1d29a27","99acb52b4db844c78b3d1bf13e201231","de8e30b1d94c40708300f77e09d9d370","c2237db384ca45069bbf04ebaa7d100b","ee1b3eedaee3479692e8a68a86ed8d7b","75a18e0f2a4e4cdfbef4de801756eae2","6c0cd4ad2a2c454faf449e694dfec6c5","d420397f38dc4f329195abf874519af1","40b3b6ae96544e86b23d92e28bed9371","7157841591e646c68b62e60ea12e6f3e","874716ab3d48462096bcf1a0cf6ed919","fe163bf8e81343219c97984a6373716e","e303dce9acb541a28743b0658092de9b","dbba246cd9594cf7ad71b45ea62d1260","1a54265153374d62a42bac6b65aa766f","34a0bbc2733045ddba99c651274e1570","56df6e9fe06246c6aab4ff43f24c86c4","88282974155e4af0901e8093c679f0bf","b1d44fb0695643a68b9b0f63a77af558","40f54b3901ad4f6d818ec3ba9298e499","88994767a12e4fd6a360353e1ad7bb75","aa0c3fe4ee0c43b79a71c9e429e06111","7d145a28620d48098fff5033388b45a8","7d789e86d75b42a2b6ac43bcd558168e","0cdb6d4786094a359bb7a2df1dc3b459","6bdc685ac8a0414e81158b175d0ffe32","5a626d6624974a4f9afa7f5e139f69c3","180479f0a5b94ed6980cc7c1a3c1d768","b7274802db8440aea7c44f55e9d1a46a","c46c3b5e41ca40c0953130d1e699eb6b","41c56c7b53c64cb480ea8b6e705931b9","30fb794c1b7b453da1896b60fa6a33b6","bf508a6ec087482c80513816464230ec","41852112c8a44d128cf5583228d699f2","6ed46a3904344d439943ac7b4c79ebf1","b25c771f5fe34a2ea04080133cb8fd49","f680ee6d00ce416a893210b9c6a2d930","5f5c572189fd44508da2a3e42121ea6d","d37f1a35390646299091f1a6e6658171","9beeeee4e141477da1c371460a9e3612","5f736b49c96e4af1b2cf72347fcf4e90","40c27a87873c4251b0f41166182ad36d","26eba292ff04486489b45dcf38c4a535","3fa16de04526481c9b5cad38b13655ee","bee28b3d7acf400287b28b48514566e9","6e941127f5ec4a2cb31e470980ce72f5"]},"id":"MOQQQ5jVY7YK","executionInfo":{"status":"ok","timestamp":1762068344803,"user_tz":-60,"elapsed":19144,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"9386dcbf-ec46-418e-cdf5-592826dee730"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:001.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m48.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m106.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m66.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m119.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m23.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m18.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m86.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m27.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m13.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=73569e0d417072ce261752c5f6d2130080eefbe3a6a57e864d37394b37501036\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"f8559e4712d14d648fb152b9dd9daed4"}},"metadata":{}}]},{"cell_type":"code","execution_count":3,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"RHjy57aMp0dZ","executionInfo":{"status":"ok","timestamp":1762071826455,"user_tz":-60,"elapsed":40962,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"624f5dd4-37f1-4d50-95e2-809e67ecc90b"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG libraries imported successfully!\n"]}],"source":["# Import required libraries for advanced RAG\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","from pydantic import BaseModel, Field\n","\n","# Core LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.core.query_engine import RetrieverQueryEngine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","# Vector store\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","\n","# Embeddings and LLM\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components (we'll use these in the assignments)\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.output_parsers import PydanticOutputParser\n","\n","print(\"โœ… Advanced RAG libraries imported successfully!\")\n"]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":456,"referenced_widgets":["250a8a57997e40b1af60337b111bf3f2","fe5d7479ed8c4604ab076c43a2c3301a","d2837079c89743eaa90ac044e2d4802e","9b3960ec4531465aba1240bf0c05ebca","8fd5ed4dd15744d9af4303a62feb3c4a","62061bdef37e4ed292c10673c599fbf8","e1b56f9fcbc246f6bdfebb3103e6c10b","a7737a716b124197b207f0047f962202","4ed32cbe8ab6499984af7bc0fbc7b1c4","010f1ff04ca145bbbb6df50363ce0ed6","6465dbe3b5a743e3afc4dbe88c189028","40446d532ec2429b8bd67d40dc57e0c3","e747fcef48974f49942e0f09b2b90c28","ac1b08e966d34e5c92fbcc569e1bcec2","bedb5daa3d8643a3bca8914ae1a42c3a","b4c0bca8ab814504b04d61b5f6985fd9","4f7611bca67647b9963c7454311f5157","c5e2eae62ff348f380835d8f4f4d839e","c17bf854c7a74086a5e01fb5ed424bfc","7e4c6e64e7eb4bf48fdab884a05c5300","19d9f33582a84edaa4777ec4ea211b02","9c7ce0f82e2a4114acd3d321af3d50d6","c39ada9041824de2826379bc89b210fa","b9553b1d398445deb216a4f5d5fdf817","9d2f24d0c49747e78d4c160205ec17f1","cb352f5e8cc2454887651511d44de524","94717defcba847fdb1ac7bd5561b8b4a","782c3c7a0c6c4bc585af78faf3fee121","5b552a72db0a4a17b1816030ed955ce4","f5caa0280cbd45fda32de11e163663a5","1ae16feb180d4b06b01e6c4e8ac93ab9","2bf6a69f039d46b8ac90a326d00acd34","35d1691185554cee8d924f02310c8e88","543b62ef07b046b485d35cbe3cc0c59a","af8e8b7b07524c44ba4eccab43f4fe54","2bc3c42d6d87429e91c96629eea7afac","73e309d88e0646f18ea42b82e4b49d33","f3151e5e1ebe4034927b5b7a977a8c31","449ee5c153a248428549413bed6b2f05","7f9411ffba484c3daee0e79837922a0d","211b57bf680b4d719569305318a76da6","cfb20360190a4c9685bfad8225f2d6e8","f0c1c373c1844ccd8917f23a89001f11","7ae4663be2ba4b659bba029a8cdb85fe","a9051fb3c78940988271e037a74ee9fe","839b7c687c0f4a058bf7c1fc156a3699","7b7f2b12432542308bccc9f6b1d96093","c89e8d6132bb4377b87a094a67460336","9efe9109d52b4472a026f2d096d9b3ab","6d4c3620b8af465fb7f1b28986f9339e","cf24500ad04a4852b55008c0b0969f8c","d93c66f95f8d4a189fe192fd6e7f1833","6fca862f50084af8a940697d81d051a4","32b35792efb146f7bf7a83c7933f4704","5990f32636604246b1e5756aa18ca57d","e2ffd2a431b043e8b2848d1bc2207b79","889ebf92c144476583fa0158275246f0","329b27c39c7449158111f64dd9fe7f20","328ed6b43aca4502a8b95d48a82937fd","b14c56f247464634abf3f99ee55d8b46","f4cbb7241f8940f7a41b362aa1c8ed2e","f4f8a2a617a74cb4b204a49de6c3961a","354e7658c8a8462eba1f8edec241b3d4","10d0cd07bc304259bc796a6d74c87897","9176fe889bdc4f06b1fca7839ea54a2c","eca212b3b5f442778d48b10d95e10b56","b6ba91ae17e14726a8f436c93a5cc471","a372fcea943646f0a956b7a07b2d5176","5116a193da1d4ffdacf69b93a6603ab6","24f3558178d4466cb0053e47bcda307f","f7a6054c2f854cdda58c5f6495cee06c","81ac32c3eece4666b08896fcd6af5278","9d283d5058084f83a7272253f41b5c23","7873594c28c841f0be9de3332e43de55","7ae6ceb6adae4fba9e6b0c46fbbc84e7","c93ded6ef98a48cb9708e700bf01eb18","1b8524f2c93448ab8630ac9a6510d3ae","be0584c80c994258bd21a59eb03257f3","4fb2d2c108734a2eb362fb3f7b3601cc","54e4310bd06441b5827ce5e1d62d098b","bbf7ada154234db88e2c3dbbdee2aa2a","acbd7751bfe740b9aea1c741523bac10","a34e5796da704094bff4e903498adf5e","fb3cd114f3104c248941d0a856f3262c","76b908639f664c43ada9539e61fd1366","6c3310d795d04bee989be7cc26594861","9a78d1c981d14805a2e08c160c90ace6","87e5d52f098b476a84c75318017776a0","dbbb15017f9945acb95b710a6303053b","a0e0c3aece5b443ba0c271faa0bdcd32","08b5f2506f474389aba1340203017785","cebf94d866dd4def81b5c669ee80db3e","188e3e21ff21413e9916589c03cfff69","07f8b4a97ffb4ef3bc0c85a2cffb9b9d","bb1f0410a1284f2486c7e008d5c125b1","55208d0538864a9d988936d4135d52a3","4e946f44dfba4c36bdbd92261c869fa1","d58006e0be3d4ae280566d7eea4de438","6bb40b76730745edb9318dc976784c35","979ca28393514c1f88ee5640afa62a73","13e6fe6837f94e719e038e4333dfa2b4","bb53e50d2457443883979f4aaf15b21a","5f44c2439ce140bfa66856518a451292","007ea67f0f334b2fb3af858dee4c9800","27383fa76dc64b93a79852dc08828997","851c60ad0ec44cf2a4f7e5f457654401","9433baa913de4d2ebf2eb20c1e503b36","8b73283803bc4fb1873a24ebb1fd8d98","07b2d6a5188d48d3bd4160a2f5cbbfe0","07573f7409c3408396ae6d1630c130aa","44f64356057343d7b2e47cd2adab85e3","7455c82cf2654e7394fdb2dbc1919e6e","9195df8563ca4f4ea568af4eab59749a","f3c80211a6554926bbae7a197a8717fd","bf6053ed97ab4d94997c59c54c8b75b3","afee44891e754ca99b4c0cb4c169a7a0","b1ba5847c11b413ea8c7ab6782c453d2","389fd3106a9b4d7c8b6b31202d850cc4","710f08624e284fc59479d9704637cc69","72fa1764bab94fe1906593df02f20d39","0c88136e3643497892b0be726c918ff4"]},"id":"gBrSdnmVp0db","executionInfo":{"status":"ok","timestamp":1762071875287,"user_tz":-60,"elapsed":11727,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"a23869dd-483b-4b1a-b8f7-f8d0e3a2b05c"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OPENROUTER_API_KEY found - full advanced RAG functionality available\n"]},{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:001.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m35.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m107.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m78.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m104.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m31.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m21.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m86.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m9.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m35.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m10.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m19.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m17.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=0f5a4de29dffc46b8f7f0fd1dafd520e0656eca3514cf1f01e56e4f45688dadc\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"442ba082712446c8bf79819d5e710a1a"}},"metadata":{}}]},{"cell_type":"code","execution_count":13,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"N85eIvmV2FHc","executionInfo":{"status":"ok","timestamp":1762076402949,"user_tz":-60,"elapsed":56,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"838448a5-6112-48eb-f6e4-eb53c6564471"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","\n","# LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"4WFlGcOo2FHd"},"source":["## ๐Ÿค– Part 2: RAG Backend Class\n","\n","Create a simple RAG backend that can initialize the database and answer queries.\n"]},{"cell_type":"code","execution_count":10,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"Q2PnOP572FHd","executionInfo":{"status":"ok","timestamp":1762076161932,"user_tz":-60,"elapsed":3793,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"3710a352-22cc-47e2-ccde-f1033c74432f"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐Ÿš€ RAG Backend initialized and ready!\n"]}],"source":["from google.colab import userdata\n","class SimpleRAGBackend:\n"," \"\"\"Simple RAG backend for Gradio frontend.\"\"\"\n","\n"," def __init__(self):\n"," self.index = None\n"," self.setup_settings()\n","\n"," def setup_settings(self):\n"," \"\"\"Configure LlamaIndex settings.\"\"\"\n"," # Set up the LLM using OpenRouter\n"," api_key = userdata.get(\"OPENROUTER_API_KEY\")\n"," if api_key:\n"," Settings.llm = OpenRouter(\n"," api_key=api_key,\n"," model=\"gpt-4o\",\n"," temperature=0.1\n"," )\n","\n"," # Set up the embedding model\n"," Settings.embed_model = HuggingFaceEmbedding(\n"," model_name=\"BAAI/bge-small-en-v1.5\",\n"," trust_remote_code=True\n"," )\n","\n"," # Set chunking parameters\n"," Settings.chunk_size = 512\n"," Settings.chunk_overlap = 50\n","\n"," def initialize_database(self, data_folder=\"/content/drive/MyDrive/OutSkill/session_2/data\"):\n"," \"\"\"Initialize the vector database with documents.\"\"\"\n"," # Check if data folder exists\n"," if not Path(data_folder).exists():\n"," return f\"โŒ Data folder '{data_folder}' not found!\"\n","\n"," try:\n"," # Create vector store\n"," vector_store = LanceDBVectorStore(\n"," uri=\"./basic_rag_vectordb\",\n"," table_name=\"documents\"\n"," )\n","\n"," # Load documents\n"," reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n"," documents = reader.load_data()\n","\n"," # Create storage context and index\n"," storage_context = StorageContext.from_defaults(vector_store=vector_store)\n"," self.index = VectorStoreIndex.from_documents(\n"," documents,\n"," storage_context=storage_context,\n"," show_progress=True\n"," )\n","\n"," return f\"โœ… Database initialized successfully with {len(documents)} documents!\"\n","\n"," except Exception as e:\n"," return f\"โŒ Error initializing database: {str(e)}\"\n","\n"," def query(self, question):\n"," \"\"\"Query the RAG system and return response.\"\"\"\n"," # Check if index exists\n"," if self.index is None:\n"," return \"โŒ Please initialize the database first!\"\n","\n"," # Check if question is empty\n"," if not question or not question.strip():\n"," return \"โš ๏ธ Please enter a question first!\"\n","\n"," try:\n"," # Create query engine and get response\n"," query_engine = self.index.as_query_engine()\n"," response = query_engine.query(question)\n"," return str(response)\n","\n"," except Exception as e:\n"," return f\"โŒ Error processing query: {str(e)}\"\n","\n","# Initialize the backend\n","rag_backend = SimpleRAGBackend()\n","print(\"๐Ÿš€ RAG Backend initialized and ready!\")\n"]},{"cell_type":"markdown","metadata":{"id":"ugsHJI_S2FHe"},"source":["## ๐ŸŽจ Part 3: Gradio Interface\n","\n","Create a simple Gradio interface with:\n","1. Button to initialize the database\n","2. Text input for queries\n","3. Button to submit queries\n","4. Text output for responses\n","5. Text output for status messages\n"]},{"cell_type":"code","execution_count":11,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"dq8_-KnS2FHe","executionInfo":{"status":"ok","timestamp":1762076181061,"user_tz":-60,"elapsed":182,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"d4187e4d-b60b-4365-afde-02161e3c6719"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Basic RAG interface created successfully!\n"]}],"source":["def create_basic_rag_interface():\n"," \"\"\"Create basic RAG interface with essential features.\"\"\"\n","\n"," def initialize_db():\n"," \"\"\"Handle database initialization.\"\"\"\n"," return rag_backend.initialize_database()\n","\n"," def handle_query(question):\n"," \"\"\"Handle user queries.\"\"\"\n"," return rag_backend.query(question)\n","\n"," # TODO: Create Gradio interface using gr.Blocks()\n"," # Hint: Look at the structure below and fill in the missing components\n","\n"," with gr.Blocks(title=\"Basic RAG Assistant\") as interface:\n"," # TODO: Add title and description\n"," # Hint: Use gr.Markdown() for formatted text\n"," gr.Markdown(\"Initialize the database and ask questions about your documents.\")\n","\n"," # TODO: Add initialization section\n"," # Hint: You need to use gr.Button to initialize the database\n"," init_btn = gr.Button(\"Initialize Database\")\n","\n"," # TODO: Add status output\n"," # Hint: You need to use gr.Textbox to display the status\n","\n"," # The connection between the button and the status output has already been implemented\n"," # at the end of this function\n"," status_output = gr.Textbox(label=\"Status\", interactive=False)\n","\n"," # TODO: Add query section\n"," # Hint: You need a text input, submit button, and response output\n","\n"," # Use gr.Textbox to create a text input\n"," query_input = gr.Textbox(\n"," label=\"Your Question\",\n"," placeholder=\"Enter your question here...\",\n"," lines=2\n"," )\n","\n"," # Use gr.Button to create a submit button\n"," submit_btn = gr.Button(\"Submit Query\")\n","\n"," # Use gr.Textbox to create a response output\n"," response_output = gr.Textbox(\n"," label=\"Response\",\n"," interactive=False,\n"," lines=5\n"," )\n","\n"," # Connect buttons to functions\n"," # Uncomment when above is implemented\n"," init_btn.click(initialize_db, outputs=[status_output])\n"," submit_btn.click(handle_query, inputs=[query_input], outputs=[response_output])\n","\n"," return interface\n","\n","# Create the interface\n","basic_interface = create_basic_rag_interface()\n","print(\"โœ… Basic RAG interface created successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"YqWRp3Kt2FHf"},"source":["## ๐Ÿš€ Part 4: Launch Your Application\n","\n","Launch your Gradio application and test it!\n"]},{"cell_type":"code","execution_count":12,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":906},"id":"8SBScZ272FHf","executionInfo":{"status":"ok","timestamp":1762076189464,"user_tz":-60,"elapsed":2591,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"e99430a3-3fbc-4657-c743-c7b3857da668"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐ŸŽ‰ Launching your Basic RAG Assistant...\n","๐Ÿ”— Your application will open in a new browser tab!\n","\n","๐Ÿ“‹ Testing Instructions:\n","1. Click 'Initialize Database' button first\n","2. Wait for success message\n","3. Enter a question in the query box\n","4. Click 'Ask Question' to get AI response\n","\n","๐Ÿ’ก Example questions to try:\n","- What are the main topics in the documents?\n","- Summarize the key findings\n","- Explain the methodology used\n","\n","๐Ÿš€ Launch your app:\n","It looks like you are running Gradio on a hosted Jupyter notebook, which requires `share=True`. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n","\n","Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n","* Running on public URL: https://916b8cbf2903c7eb96.gradio.live\n","\n","This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"]},{"output_type":"display_data","data":{"text/plain":[""],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":12}],"source":["print(\"๐ŸŽ‰ Launching your Basic RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Enter a question in the query box\")\n","print(\"4. Click 'Ask Question' to get AI response\")\n","print(\"\")\n","print(\"๐Ÿ’ก Example questions to try:\")\n","print(\"- What are the main topics in the documents?\")\n","print(\"- Summarize the key findings\")\n","print(\"- Explain the methodology used\")\n","print(\"\")\n","print(\"๐Ÿš€ Launch your app:\")\n","\n","# Your launch code here:\n","# Uncomment when implemented\n","basic_interface.launch()"]},{"cell_type":"markdown","metadata":{"id":"J8_vObDT2FHf"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [x] RAG backend is provided and working\n","- [ ] Created Gradio interface with required components:\n"," - [ ] Title and description using gr.Markdown()\n"," - [ ] Initialize database button using gr.Button()\n"," - [ ] Status output using gr.Textbox()\n"," - [ ] Query input field using gr.Textbox()\n"," - [ ] Submit query button using gr.Button()\n"," - [ ] Response output area using gr.Textbox()\n","- [ ] Connected buttons to backend functions using .click()\n","- [ ] Successfully launched the application\n","- [ ] Tested the full workflow (initialize โ†’ query โ†’ response)\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built your first Gradio RAG application! You now have:\n","\n","- A functional web interface for your RAG system\n","- Understanding of Gradio basics and component connections\n","- A foundation for building more complex AI applications\n","\n","**Next Steps**: Complete Assignment 3b to add advanced configuration options to your RAG interface!\n"]}],"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Chandra_Sekhar_Yandra/Day_7/assignment_3b_advanced_gradio_rag.ipynb b/Chandra_Sekhar_Yandra/Day_7/assignment_3b_advanced_gradio_rag.ipynb deleted file mode 100644 index 6d570c0..0000000 --- a/Chandra_Sekhar_Yandra/Day_7/assignment_3b_advanced_gradio_rag.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"ogVD2bjH7nCP"},"source":["# Assignment 3b: Advanced Gradio RAG Frontend\n","## Day 6 Session 2 - Building Configurable RAG Applications\n","\n","In this assignment, you'll extend your basic RAG interface with advanced configuration options to create a professional, feature-rich RAG application.\n","\n","**New Features to Add:**\n","- Model selection dropdown (gpt-4o, gpt-4o-mini)\n","- Temperature slider (0 to 1 with 0.1 intervals)\n","- Chunk size configuration\n","- Chunk overlap configuration \n","- Similarity top-k slider\n","- Node postprocessor multiselect\n","- Similarity cutoff slider\n","- Response synthesizer multiselect\n","\n","**Learning Objectives:**\n","- Advanced Gradio components and interactions\n","- Dynamic RAG configuration\n","- Professional UI design patterns\n","- Parameter validation and handling\n","- Building production-ready AI applications\n","\n","**Prerequisites:**\n","- Completed Assignment 3a (Basic Gradio RAG)\n","- Understanding of RAG parameters and their effects\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"cfHIdWMs7tTG","executionInfo":{"status":"ok","timestamp":1762076341514,"user_tz":-60,"elapsed":18309,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"24939c85-7eff-4c16-d57d-ed87c744dcca"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["!pip install -r \"/content/drive/MyDrive/OutSkill/session_2/requirements.txt\""],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"zY8YFmpY8bTw","executionInfo":{"status":"ok","timestamp":1762076560885,"user_tz":-60,"elapsed":52717,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"a0275112-bd4c-4ca1-ed2f-af8006659892"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m12.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/OutSkill/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m43.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m89.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m65.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m105.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m17.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m2.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m11.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m52.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m21.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m10.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=5f26e4130eee03c7d6ed12e783b01330b66ce188c05916c0fe0535b83a421b04\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"85674f4ebc3a48b9b80cf522b5e6fd96"}},"metadata":{}}]},{"cell_type":"markdown","metadata":{"id":"KJ9lla2n7nCU"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries including advanced RAG components for configuration options.\n","\n","**Note:** This assignment uses OpenRouter for LLM access (not OpenAI). Make sure you have your `OPENROUTER_API_KEY` environment variable set.\n"]},{"cell_type":"code","execution_count":3,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"LeCEGjPf7nCV","executionInfo":{"status":"ok","timestamp":1762076644694,"user_tz":-60,"elapsed":45388,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"8278d94c-5537-4ff1-d132-329ecb4cbd4c"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import all required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","\n","# LlamaIndex core components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"xHfjatCc7nCX"},"source":["## ๐Ÿค– Part 2: Advanced RAG Backend Class\n","\n","Create an advanced RAG backend that supports dynamic configuration of all parameters.\n"]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":386,"referenced_widgets":["8604264dc91244e5b220bd7c4eafde65","4f05b7fa10294e418ab83a4a9682de97","9efbbe79459f4cb380a9385d04602190","5306981fa6014905af10c1ca496ae79e","4bd337fcf32e40a995de3d700d7076e2","478e9999f2d6403b90dd357d63892394","7d09c392935f4ae68cff11825658d956","77212086fccc45a08d90a4d3019f81f8","cad089b3d1c14245a5f5f27c3f883995","157de0c0e4d84630b1f68a541670406a","7d4749a8eef64690b258a1ec2f4f0a5d","6975ef41f4fb4048a9e7750e58928842","5c8793d95ca74111bab066d33025a6cf","3b2835809bec43c581c4f698faaf1684","a8ada46429304fb3a7a967166d81a39d","8f904038be004972b57dd19267996517","7fbb2bfcf123433eafd300f189a377e3","c61a9ba7cd684f2a8e440b0ba0489423","59fc7367fa14442685cd55259353af14","15ac214dfeab4c108859d9a866f28ec3","a53da8f930bd4a9389e5784ca936f327","b2c16eda91864c7b9f4829c5de52306e","e7c27024bbe94d7789644b3d5a590416","ff97ae78d78f49789d6d0cd0a83b4262","fa92ad4e8ff5422791620e320962a3b9","28b3e31f74bf41a796ae0a5e182ee796","97229b2177a640ed926dfc41e079555d","f3d7b39af9d140bbbca7ba03b5e388a9","c67db995ba5645baa7badc7278a0fa79","a4fad66eb18646198119d009f11dc1b0","9a34463da8644b0b8c60ca7f6cdf084c","69f5656f8acf415c89fbf35dd5687ec6","c4338b3bffd445b5b4741dffbe35b8d8","2aeb1bc3e00b4f4eb807e73ab7673780","e53050cee4ca470ab17369df159381f6","0309865d6fa1482f9a9040a4c88d230d","245117ffacca475782b2d83d12dcb1de","55adf966a1d2425faa0798df1e4b64ed","938deff65288463c9c4d5661c16cd49d","bc048cedbc0c400abc416dff11670bb1","f632c393f14a451288e3b268f165204c","df81392c77974cb9b662d95bf2466bea","0b95a5b41d3e4002ab447186f74347c9","a671a07a004a4ab185a8a5d00e4d4fa7","9f6c9540c9054609aaa0552d02f617de","6c68b0f35cc94529aa85594f01e6661a","6292d1404e464ba094b8d8f2ea0b568e","3bad25e1c4664f7bb8ae6c2395e9971a","adc132e026054fe9bdc7f7aedef2ea63","5a6d101f035e4a609b9851ce5e5429af","d9d256b2944848aa8b79ea6c6b2f4119","5c61221cc07e4aca819c6ac7f3ed66d1","a5e2f42365e048319118168576a3ce21","3096e2e856f44dbb87d3ea29a8ba683c","782fdf488dc14f24864990c98844d1b2","38c9b3a98c354c8aa5efa7886b367cbf","b50d144ee700417cbcbccf38344243d4","accf0eb5d2334426b82695982c50c668","4956a5b3cd41447db2f056f1df27c464","e2108e00def44b118c2340b2d8e04158","76247a77e1cd4890a6eb7404680a2076","d6ae552768e242c9bf216b45c787fb46","c44bc8795a31454ca287c7c2aa32b421","c4317b5ae1a04ec3ba87783c44500605","b971d8a392f54127909469183eddd2e8","700bf7d497ab478a90c5f05a80bf41a8","6967529dacc94d93bd61758210fd272e","9748c5335e714b4cae231a8e282eaec5","ea8ae967e70940678afde0d3ab0dee1f","c138a6fa38af473b82b0a92c3306d713","018d9c061ad1460aaf0142f7c02f2db1","e0e277e418d44f09b849f9e73f4987ae","7637569217f844d48757fea4d5f68dde","a6b94e17d58642989fc52e557c4d524b","02c4ffb48da641afb95c555a090d8738","42279ff6bd8c468d9923590245f12355","b8f5b8519e0948d79c30fea7d73a36d7","2c0798e0b0a4401b9ad1853c968df38e","d78d1f0598de44079bfc8ede01e8310b","3f08efcc4c674805a10d31d02c181087","99c0b99a1bfb4feba6a2a836b95f4b43","15236280ace3460da029570ef328b0f3","3b4ca46541a048e0892b9504c434ac33","df24eeafe9c94bcf9f2581cc71914d62","14f2fd328a644486bb45300130e38a68","355e9dc84d9a49a2b4dc7dbc3c11c1e7","2bab2235206b4fffb2d1fac131166d5a","c396b2d431b04ba08ff1b7f02f33f861","57563ee47e0840d0ae68020a71952849","35feb8eda7574e05a262642c22e790c0","8787a4558fc94b1ea28facb926609988","f5bd6636604a457b8773381e206e4aeb","e97ea5f01061459f81c8438a6b865a1f","bb3dec7ac05740f2adf47a8dcf9ab22d","67070f7b4ae74bfabd6b03c9b72ad90d","abd2a104241c4417b0422ec5636d658c","530fee05b318484db49774aea46f0427","917701dd815a45d0bfd658a6ae698604","c43ae0bf26604fc783d4191c77b760cd","cb103c5e92eb4bf796a8a5221cfac47b","6d82dd318bce403c9e720adb41a50cba","f9b26b7429ee49699813253b4b4ac1dc","1f6be59d05a8490d8e12266692147a71","f6cbe5e45d2d4d0c90c169c94e9817cf","f9bf34de1e7946659c7fd71a14c34fa6","cac7e05fb2c34c58bcf347d2184e757a","21494b02a50f439bae51b8cbc5bb4328","c197d319ff984ff3a101504fc827ab19","a537db1dc3de47418a1f1951cfc32174","2aeab274acd2427fb7124b0ca46198ca","bea45ab851dd406cb15bd92e322fba4b","2022df3a4b36495fa2ed7ca10e25ffa9","ed3887c759c34fbba2305f680bb81458","1ca4d8518b884920ac4ae9bd4e9cbd64","003acc689d884bb78436307a87a58400","0548282c435a41398e84d20f71edb39a","123fe38481324a20a899ca44c8748258","e9456cac3dee45329f42d6ebbfb08788","0ca714858a0b40ec978bfbbef638b5d0","b60c5eae9e584c5d9f6ec003d688865c","68853a910341481bb354b571820e29fc"]},"id":"G-8s3Lb37nCY","executionInfo":{"status":"ok","timestamp":1762076959812,"user_tz":-60,"elapsed":11387,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"dae60ef5-8c88-48a1-981f-762212c2e5e5"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00 Dict[str, Any]:\n"," \"\"\"Query the RAG system with advanced configuration.\"\"\"\n","\n"," # Check if index exists\n"," if self.index is None:\n"," return {\"response\": \"โŒ Please initialize the database first!\", \"sources\": [], \"config\": {}}\n","\n"," # Check if question is empty\n"," if not question or not question.strip():\n"," return {\"response\": \"โš ๏ธ Please enter a question first!\", \"sources\": [], \"config\": {}}\n","\n"," try:\n"," # Update settings with new parameters\n"," self.update_settings(model, temperature, chunk_size, chunk_overlap)\n","\n"," # Get postprocessors\n"," postprocessors = []\n"," for name in postprocessor_names:\n"," processor = self.get_postprocessor(name, similarity_cutoff)\n"," if processor is not None:\n"," postprocessors.append(processor)\n","\n"," # Get synthesizer\n"," synthesizer = self.get_synthesizer(synthesizer_name)\n","\n"," # Create query engine with all parameters\n"," query_engine_kwargs = {\"similarity_top_k\": similarity_top_k}\n"," if postprocessors:\n"," query_engine_kwargs[\"node_postprocessors\"] = postprocessors\n"," if synthesizer is not None:\n"," query_engine_kwargs[\"response_synthesizer\"] = synthesizer\n","\n"," query_engine = self.index.as_query_engine(**query_engine_kwargs)\n","\n"," # Query and get response\n"," response = query_engine.query(question)\n","\n"," # Extract source information if available\n"," sources = []\n"," if hasattr(response, 'source_nodes'):\n"," for node in response.source_nodes:\n"," sources.append({\n"," \"text\": node.text[:200] + \"...\",\n"," \"score\": getattr(node, 'score', 0.0),\n"," \"source\": getattr(node.node, 'metadata', {}).get('file_name', 'Unknown')\n"," })\n","\n"," return {\n"," \"response\": str(response),\n"," \"sources\": sources,\n"," \"config\": {\n"," \"model\": model,\n"," \"temperature\": temperature,\n"," \"chunk_size\": chunk_size,\n"," \"chunk_overlap\": chunk_overlap,\n"," \"similarity_top_k\": similarity_top_k,\n"," \"postprocessors\": postprocessor_names,\n"," \"similarity_cutoff\": similarity_cutoff,\n"," \"synthesizer\": synthesizer_name\n"," }\n"," }\n","\n"," except Exception as e:\n"," return {\"response\": f\"โŒ Error processing query: {str(e)}\", \"sources\": [], \"config\": {}}\n","\n","# Initialize the backend\n","rag_backend = AdvancedRAGBackend()\n","print(\"๐Ÿš€ Advanced RAG Backend initialized and ready!\")\n"]},{"cell_type":"markdown","metadata":{"id":"bJSGzqnP7nCa"},"source":["## ๐ŸŽจ Part 3: Advanced Gradio Interface\n","\n","Create a sophisticated Gradio interface with all the configuration options specified:\n","1. Database initialization button\n","2. Search query input and button \n","3. Model selection dropdown\n","4. Temperature slider\n","5. Chunk size and overlap inputs\n","6. Similarity top-k slider\n","7. Node postprocessor multiselect\n","8. Similarity cutoff slider\n","9. Response synthesizer multiselect\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"ESJIYeAO7nCa","executionInfo":{"status":"ok","timestamp":1762077739127,"user_tz":-60,"elapsed":525,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"4973da73-3a98-4ea9-bec1-ca66b73047d3"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG interface created successfully!\n"]}],"source":["def create_advanced_rag_interface():\n"," \"\"\"Create advanced RAG interface with full configuration options.\"\"\"\n","\n"," def initialize_db():\n"," \"\"\"Handle database initialization.\"\"\"\n"," return rag_backend.initialize_database()\n","\n"," def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer):\n"," \"\"\"Handle advanced RAG queries with all configuration options.\"\"\"\n"," result = rag_backend.advanced_query(\n"," question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer\n"," )\n","\n"," # Format configuration for display\n"," config_text = f\"\"\"**Current Configuration:**\n","- Model: {result['config'].get('model', 'N/A')}\n","- Temperature: {result['config'].get('temperature', 'N/A')}\n","- Chunk Size: {result['config'].get('chunk_size', 'N/A')}\n","- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')}\n","- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')}\n","- Postprocessors: {', '.join(result['config'].get('postprocessors', []))}\n","- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')}\n","- Synthesizer: {result['config'].get('synthesizer', 'N/A')}\"\"\"\n","\n"," return result[\"response\"], config_text\n","\n"," # TODO: Create the advanced interface structure\n"," # Hint: This interface needs more complex layout with configuration controls\n","\n"," with gr.Blocks(title=\"Advanced RAG Assistant\") as interface:\n"," # TODO: Add title and description\n"," # Hint: Use gr.Markdown() for formatted text\n","\n"," # Your title and description here:\n"," gr.Markdown(\"# ๐Ÿš€ Advanced RAG Assistant\")\n"," gr.Markdown(\"Configure and query your Retrieval-Augmented Generation system with full control over all parameters.\")\n","\n"," # Database initialization section\n"," gr.Markdown(\"### ๐Ÿ“Š Database Setup\")\n","\n","\n"," # TODO: Add database initialization section\n"," # Hint: Use gr.Button() for initialization and gr.Textbox() for status\n"," init_btn = gr.Button(\"Initialize Database\", variant=\"secondary\")\n"," status_output = gr.Textbox(label=\"Initialization Status\", interactive=False, lines=2)\n","\n","\n"," # TODO: Create main layout with columns\n"," # Hint: Configuration controls on left, query/response on right makes sense\n"," # Use gr.Row() and gr.Column() to organize this\n","\n"," with gr.Row():\n"," with gr.Column(scale=1):\n","\n"," gr.Markdown(\"### โš™๏ธ RAG Configuration\")\n","\n"," # TODO: Model selection\n"," # Hint: Use gr.Dropdown() with choices=[\"gpt-4o\", \"gpt-4o-mini\"]\n"," model_dropdown = gr.Dropdown(\n"," choices=[\"gpt-4o\", \"gpt-4o-mini\"],\n"," value=\"gpt-4o-mini\",\n"," label=\"Model\"\n"," )\n","\n","\n"," # TODO: Temperature control\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.1\n"," temperature_slider = gr.Slider(\n"," minimum=0.0,\n"," maximum=1.0,\n"," step=0.1,\n"," value=0.1,\n"," label=\"Temperature\"\n"," )\n","\n","\n"," # TODO: Chunking parameters\n"," # Hint: Use gr.Number() for numeric inputs with default values\n"," # chunk_size_input = ? (default 512)\n"," chunk_size_input = gr.Number(\n"," value=512,\n"," label=\"Chunk Size\",\n"," precision=0\n"," )\n","\n"," # chunk_overlap_input = ? (default 50)\n"," chunk_overlap_input = gr.Number(\n"," value=50,\n"," label=\"Chunk Overlap\",\n"," precision=0\n"," )\n","\n","\n"," # TODO: Retrieval parameters\n"," # Hint: Use gr.Slider() with minimum=1, maximum=20, step=1, value=5\n"," # similarity_topk_slider = ?\n"," similarity_topk_slider = gr.Slider(\n"," minimum=1,\n"," maximum=20,\n"," step=1,\n"," value=5,\n"," label=\"Similarity Top-K\"\n"," )\n","\n","\n"," # TODO: Postprocessor selection\n"," # Hint: Use gr.CheckboxGroup() with choices=[\"SimilarityPostprocessor\"]\n"," # postprocessor_checkbox = ?\n"," postprocessor_checkbox = gr.CheckboxGroup(\n"," choices=[\"SimilarityPostprocessor\"],\n"," label=\"Postprocessors\"\n"," )\n","\n","\n"," # TODO: Similarity filtering\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.3\n"," # similarity_cutoff_slider = ?\n"," similarity_cutoff_slider = gr.Slider(\n"," minimum=0.0,\n"," maximum=1.0,\n"," step=0.1,\n"," value=0.3,\n"," label=\"Similarity Cutoff\"\n"," )\n","\n","\n"," # TODO: Response synthesizer\n"," # Hint: Use gr.Dropdown() with choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"]\n"," # synthesizer_dropdown = ?\n"," synthesizer_dropdown = gr.Dropdown(\n"," choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"],\n"," value=\"TreeSummarize\",\n"," label=\"Response Synthesizer\"\n"," )\n","\n","\n"," with gr.Column(scale=2):\n"," gr.Markdown(\"### ๐Ÿ’ฌ Query Interface\")\n","\n"," # TODO: Query input\n"," # Hint: Use gr.Textbox() with label=\"Ask a question\", placeholder text, lines=3\n"," # query_input = ?\n"," query_input = gr.Textbox(\n"," label=\"Ask a question\",\n"," placeholder=\"Enter your question about the documents...\",\n"," lines=3\n"," )\n","\n","\n"," # TODO: Submit button\n"," # Hint: Use gr.Button() with variant=\"primary\"\n"," # submit_btn = ?\n"," submit_btn = gr.Button(\"Submit Query\", variant=\"primary\")\n","\n","\n"," # TODO: Response output\n"," # Hint: Use gr.Textbox() with lines=12, interactive=False\n"," # response_output = ?\n"," response_output = gr.Textbox(\n"," label=\"Response\",\n"," lines=12,\n"," interactive=False\n"," )\n","\n","\n"," # TODO: Configuration display\n"," # Hint: Use gr.Textbox() with lines=8, interactive=False\n"," # config_display = ?\n"," config_display = gr.Textbox(\n"," label=\"Active Configuration\",\n"," lines=8,\n"," interactive=False\n"," )\n","\n","\n"," # Uncomment to Connect functions to components\n"," init_btn.click(initialize_db, outputs=[status_output])\n","\n"," submit_btn.click(\n"," handle_advanced_query,\n"," inputs=[\n"," query_input, model_dropdown, temperature_slider,\n"," chunk_size_input, chunk_overlap_input, similarity_topk_slider,\n"," postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown\n"," ],\n"," outputs=[response_output, config_display]\n"," )\n","\n","\n"," return interface\n","\n","# Create the interface\n","advanced_interface = create_advanced_rag_interface()\n","print(\"โœ… Advanced RAG interface created successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"eBm-9bTg7nCe"},"source":["## ๐Ÿš€ Part 4: Launch Your Advanced Application\n","\n","Launch your advanced Gradio application and test all the configuration options!\n"]},{"cell_type":"code","execution_count":6,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"Od1XKgln7nCe","executionInfo":{"status":"ok","timestamp":1762077749864,"user_tz":-60,"elapsed":1862,"user":{"displayName":"Chandra Sekhar","userId":"10081177651521172224"}},"outputId":"7875e27f-1faa-4137-f143-63350b44c54d"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐ŸŽ‰ Launching your Advanced RAG Assistant...\n","๐Ÿ”— Your application will open in a new browser tab!\n","\n","โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\n","\n","๐Ÿ“‹ Testing Instructions:\n","1. Click 'Initialize Vector Database' button first\n","2. Wait for success message\n","3. Configure your RAG parameters:\n"," - Choose model (gpt-4o, gpt-4o-mini)\n"," - Adjust temperature (0.0 = deterministic, 1.0 = creative)\n"," - Set chunk size and overlap\n"," - Choose similarity top-k\n"," - Select postprocessors and synthesizer\n","4. Enter a question and click 'Ask Question'\n","5. Review both the response and configuration used\n","\n","๐Ÿงช Experiments to try:\n","- Compare different models with the same question\n","- Test temperature effects (0.1 vs 0.9)\n","- Try different chunk sizes (256 vs 1024)\n","- Compare synthesizers (TreeSummarize vs Refine)\n","- Adjust similarity cutoff to filter results\n","It looks like you are running Gradio on a hosted Jupyter notebook, which requires `share=True`. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n","\n","Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n","* Running on public URL: https://a1ccb1960daeb1b98b.gradio.live\n","\n","This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"]},{"output_type":"display_data","data":{"text/plain":[""],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":6}],"source":["print(\"๐ŸŽ‰ Launching your Advanced RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Vector Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Configure your RAG parameters:\")\n","print(\" - Choose model (gpt-4o, gpt-4o-mini)\")\n","print(\" - Adjust temperature (0.0 = deterministic, 1.0 = creative)\")\n","print(\" - Set chunk size and overlap\")\n","print(\" - Choose similarity top-k\")\n","print(\" - Select postprocessors and synthesizer\")\n","print(\"4. Enter a question and click 'Ask Question'\")\n","print(\"5. Review both the response and configuration used\")\n","print(\"\")\n","print(\"๐Ÿงช Experiments to try:\")\n","print(\"- Compare different models with the same question\")\n","print(\"- Test temperature effects (0.1 vs 0.9)\")\n","print(\"- Try different chunk sizes (256 vs 1024)\")\n","print(\"- Compare synthesizers (TreeSummarize vs Refine)\")\n","print(\"- Adjust similarity cutoff to filter results\")\n","\n","# Your code here:\n","advanced_interface.launch()"]},{"cell_type":"markdown","metadata":{"id":"S4_y-7f87nCf"},"source":["## ๐Ÿ’ก Understanding the Configuration Options\n","\n","### Model Selection\n","- **gpt-4o**: Latest and most capable model, best quality responses\n","- **gpt-4o-mini**: Faster and cheaper while maintaining good quality\n","\n","### Temperature (0.0 - 1.0)\n","- **0.0-0.3**: Deterministic, factual responses\n","- **0.4-0.7**: Balanced creativity and accuracy\n","- **0.8-1.0**: More creative and varied responses\n","\n","### Chunk Size & Overlap\n","- **Chunk Size**: How much text to process at once (256-1024 typical)\n","- **Chunk Overlap**: Overlap between chunks to maintain context (10-100 typical)\n","\n","### Similarity Top-K (1-20)\n","- **Lower values (3-5)**: More focused, faster responses\n","- **Higher values (8-15)**: More comprehensive, detailed responses\n","\n","### Node Postprocessors\n","- **SimilarityPostprocessor**: Filters out low-relevance documents\n","\n","### Similarity Cutoff (0.0-1.0)\n","- **0.1-0.3**: More permissive, includes potentially relevant docs\n","- **0.5-0.8**: More strict, only highly relevant docs\n","\n","### Response Synthesizers\n","- **TreeSummarize**: Hierarchical summarization, good for complex topics\n","- **Refine**: Iterative refinement, builds detailed responses\n","- **CompactAndRefine**: Efficient version of Refine\n","- **Default**: Standard synthesis approach\n"]},{"cell_type":"markdown","metadata":{"id":"ynbCd3Ay7nCg"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [ ] Set up your OPENROUTER_API_KEY environment variable\n","- [ ] Imported all necessary libraries including advanced RAG components\n","- [ ] Created AdvancedRAGBackend class with configurable parameters\n","- [ ] Implemented all required methods:\n"," - [ ] `update_settings()` - Updates LLM and chunking parameters\n"," - [ ] `initialize_database()` - Sets up vector database\n"," - [ ] `get_postprocessor()` - Returns selected postprocessor\n"," - [ ] `get_synthesizer()` - Returns selected synthesizer\n"," - [ ] `advanced_query()` - Handles queries with all configuration options\n","- [ ] Created advanced Gradio interface with all required components:\n"," - [ ] Initialize database button\n"," - [ ] Model selection dropdown (gpt-4o, gpt-4o-mini)\n"," - [ ] Temperature slider (0 to 1, step 0.1)\n"," - [ ] Chunk size input (default 512)\n"," - [ ] Chunk overlap input (default 50)\n"," - [ ] Similarity top-k slider (1 to 20, default 5)\n"," - [ ] Node postprocessor multiselect\n"," - [ ] Similarity cutoff slider (0.0 to 1.0, step 0.1, default 0.3)\n"," - [ ] Response synthesizer dropdown\n"," - [ ] Query input and submit button\n"," - [ ] Response output\n"," - [ ] Configuration display\n","- [ ] Connected all components to backend functions\n","- [ ] Successfully launched the application\n","- [ ] Tested different parameter combinations\n","- [ ] Verified all configuration options work correctly\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built a professional, production-ready RAG application! You now have:\n","\n","- **Advanced Parameter Control**: Full control over all RAG system parameters\n","- **Professional UI**: Clean, organized interface with proper layout\n","- **Real-time Configuration**: Ability to experiment with different settings\n","- **Production Patterns**: Understanding of how to build scalable AI applications\n","\n","## ๐Ÿš€ Next Steps & Extensions\n","\n","**Potential Enhancements:**\n","1. **Authentication**: Add user login and session management\n","2. **Document Upload**: Allow users to upload their own documents\n","3. **Chat History**: Implement conversation memory\n","4. **Performance Monitoring**: Add response time and quality metrics\n","5. **A/B Testing**: Compare different configurations side-by-side\n","6. **Export Features**: Download responses and configurations\n","7. **Advanced Visualizations**: Show document similarity scores and retrieval paths\n","\n","**Deployment Options:**\n","- **Local**: Run on your machine for development\n","- **Gradio Cloud**: Deploy with `interface.launch(share=True)`\n","- **Hugging Face Spaces**: Deploy to Hugging Face for public access\n","- **Docker**: Containerize for scalable deployment\n","- **Cloud Platforms**: Deploy to AWS, GCP, or Azure\n","\n","You're now ready to build sophisticated AI-powered applications!\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"8604264dc91244e5b220bd7c4eafde65":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_4f05b7fa10294e418ab83a4a9682de97","IPY_MODEL_9efbbe79459f4cb380a9385d04602190","IPY_MODEL_5306981fa6014905af10c1ca496ae79e"],"layout":"IPY_MODEL_4bd337fcf32e40a995de3d700d7076e2"}},"4f05b7fa10294e418ab83a4a9682de97":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_478e9999f2d6403b90dd357d63892394","placeholder":"โ€‹","style":"IPY_MODEL_7d09c392935f4ae68cff11825658d956","value":"modules.json:โ€‡100%"}},"9efbbe79459f4cb380a9385d04602190":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_77212086fccc45a08d90a4d3019f81f8","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cad089b3d1c14245a5f5f27c3f883995","value":349}},"5306981fa6014905af10c1ca496ae79e":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_157de0c0e4d84630b1f68a541670406a","placeholder":"โ€‹","style":"IPY_MODEL_7d4749a8eef64690b258a1ec2f4f0a5d","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡23.2kB/s]"}},"4bd337fcf32e40a995de3d700d7076e2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"478e9999f2d6403b90dd357d63892394":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7d09c392935f4ae68cff11825658d956":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"77212086fccc45a08d90a4d3019f81f8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cad089b3d1c14245a5f5f27c3f883995":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"157de0c0e4d84630b1f68a541670406a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7d4749a8eef64690b258a1ec2f4f0a5d":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6975ef41f4fb4048a9e7750e58928842":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5c8793d95ca74111bab066d33025a6cf","IPY_MODEL_3b2835809bec43c581c4f698faaf1684","IPY_MODEL_a8ada46429304fb3a7a967166d81a39d"],"layout":"IPY_MODEL_8f904038be004972b57dd19267996517"}},"5c8793d95ca74111bab066d33025a6cf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7fbb2bfcf123433eafd300f189a377e3","placeholder":"โ€‹","style":"IPY_MODEL_c61a9ba7cd684f2a8e440b0ba0489423","value":"config_sentence_transformers.json:โ€‡100%"}},"3b2835809bec43c581c4f698faaf1684":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_59fc7367fa14442685cd55259353af14","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_15ac214dfeab4c108859d9a866f28ec3","value":124}},"a8ada46429304fb3a7a967166d81a39d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a53da8f930bd4a9389e5784ca936f327","placeholder":"โ€‹","style":"IPY_MODEL_b2c16eda91864c7b9f4829c5de52306e","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡9.27kB/s]"}},"8f904038be004972b57dd19267996517":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7fbb2bfcf123433eafd300f189a377e3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c61a9ba7cd684f2a8e440b0ba0489423":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"59fc7367fa14442685cd55259353af14":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"15ac214dfeab4c108859d9a866f28ec3":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a53da8f930bd4a9389e5784ca936f327":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b2c16eda91864c7b9f4829c5de52306e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e7c27024bbe94d7789644b3d5a590416":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ff97ae78d78f49789d6d0cd0a83b4262","IPY_MODEL_fa92ad4e8ff5422791620e320962a3b9","IPY_MODEL_28b3e31f74bf41a796ae0a5e182ee796"],"layout":"IPY_MODEL_97229b2177a640ed926dfc41e079555d"}},"ff97ae78d78f49789d6d0cd0a83b4262":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f3d7b39af9d140bbbca7ba03b5e388a9","placeholder":"โ€‹","style":"IPY_MODEL_c67db995ba5645baa7badc7278a0fa79","value":"README.md:โ€‡"}},"fa92ad4e8ff5422791620e320962a3b9":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a4fad66eb18646198119d009f11dc1b0","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_9a34463da8644b0b8c60ca7f6cdf084c","value":1}},"28b3e31f74bf41a796ae0a5e182ee796":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_69f5656f8acf415c89fbf35dd5687ec6","placeholder":"โ€‹","style":"IPY_MODEL_c4338b3bffd445b5b4741dffbe35b8d8","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡5.36MB/s]"}},"97229b2177a640ed926dfc41e079555d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f3d7b39af9d140bbbca7ba03b5e388a9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c67db995ba5645baa7badc7278a0fa79":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a4fad66eb18646198119d009f11dc1b0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"9a34463da8644b0b8c60ca7f6cdf084c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"69f5656f8acf415c89fbf35dd5687ec6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c4338b3bffd445b5b4741dffbe35b8d8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2aeb1bc3e00b4f4eb807e73ab7673780":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_e53050cee4ca470ab17369df159381f6","IPY_MODEL_0309865d6fa1482f9a9040a4c88d230d","IPY_MODEL_245117ffacca475782b2d83d12dcb1de"],"layout":"IPY_MODEL_55adf966a1d2425faa0798df1e4b64ed"}},"e53050cee4ca470ab17369df159381f6":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_938deff65288463c9c4d5661c16cd49d","placeholder":"โ€‹","style":"IPY_MODEL_bc048cedbc0c400abc416dff11670bb1","value":"sentence_bert_config.json:โ€‡100%"}},"0309865d6fa1482f9a9040a4c88d230d":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_f632c393f14a451288e3b268f165204c","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_df81392c77974cb9b662d95bf2466bea","value":52}},"245117ffacca475782b2d83d12dcb1de":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0b95a5b41d3e4002ab447186f74347c9","placeholder":"โ€‹","style":"IPY_MODEL_a671a07a004a4ab185a8a5d00e4d4fa7","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡4.23kB/s]"}},"55adf966a1d2425faa0798df1e4b64ed":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"938deff65288463c9c4d5661c16cd49d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bc048cedbc0c400abc416dff11670bb1":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f632c393f14a451288e3b268f165204c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"df81392c77974cb9b662d95bf2466bea":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0b95a5b41d3e4002ab447186f74347c9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a671a07a004a4ab185a8a5d00e4d4fa7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9f6c9540c9054609aaa0552d02f617de":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_6c68b0f35cc94529aa85594f01e6661a","IPY_MODEL_6292d1404e464ba094b8d8f2ea0b568e","IPY_MODEL_3bad25e1c4664f7bb8ae6c2395e9971a"],"layout":"IPY_MODEL_adc132e026054fe9bdc7f7aedef2ea63"}},"6c68b0f35cc94529aa85594f01e6661a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5a6d101f035e4a609b9851ce5e5429af","placeholder":"โ€‹","style":"IPY_MODEL_d9d256b2944848aa8b79ea6c6b2f4119","value":"config.json:โ€‡100%"}},"6292d1404e464ba094b8d8f2ea0b568e":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5c61221cc07e4aca819c6ac7f3ed66d1","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_a5e2f42365e048319118168576a3ce21","value":743}},"3bad25e1c4664f7bb8ae6c2395e9971a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3096e2e856f44dbb87d3ea29a8ba683c","placeholder":"โ€‹","style":"IPY_MODEL_782fdf488dc14f24864990c98844d1b2","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡39.8kB/s]"}},"adc132e026054fe9bdc7f7aedef2ea63":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5a6d101f035e4a609b9851ce5e5429af":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d9d256b2944848aa8b79ea6c6b2f4119":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5c61221cc07e4aca819c6ac7f3ed66d1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a5e2f42365e048319118168576a3ce21":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"3096e2e856f44dbb87d3ea29a8ba683c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"782fdf488dc14f24864990c98844d1b2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"38c9b3a98c354c8aa5efa7886b367cbf":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_b50d144ee700417cbcbccf38344243d4","IPY_MODEL_accf0eb5d2334426b82695982c50c668","IPY_MODEL_4956a5b3cd41447db2f056f1df27c464"],"layout":"IPY_MODEL_e2108e00def44b118c2340b2d8e04158"}},"b50d144ee700417cbcbccf38344243d4":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_76247a77e1cd4890a6eb7404680a2076","placeholder":"โ€‹","style":"IPY_MODEL_d6ae552768e242c9bf216b45c787fb46","value":"model.safetensors:โ€‡100%"}},"accf0eb5d2334426b82695982c50c668":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c44bc8795a31454ca287c7c2aa32b421","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c4317b5ae1a04ec3ba87783c44500605","value":133466304}},"4956a5b3cd41447db2f056f1df27c464":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b971d8a392f54127909469183eddd2e8","placeholder":"โ€‹","style":"IPY_MODEL_700bf7d497ab478a90c5f05a80bf41a8","value":"โ€‡133M/133Mโ€‡[00:01<00:00,โ€‡108MB/s]"}},"e2108e00def44b118c2340b2d8e04158":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"76247a77e1cd4890a6eb7404680a2076":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d6ae552768e242c9bf216b45c787fb46":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c44bc8795a31454ca287c7c2aa32b421":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c4317b5ae1a04ec3ba87783c44500605":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b971d8a392f54127909469183eddd2e8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"700bf7d497ab478a90c5f05a80bf41a8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6967529dacc94d93bd61758210fd272e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_9748c5335e714b4cae231a8e282eaec5","IPY_MODEL_ea8ae967e70940678afde0d3ab0dee1f","IPY_MODEL_c138a6fa38af473b82b0a92c3306d713"],"layout":"IPY_MODEL_018d9c061ad1460aaf0142f7c02f2db1"}},"9748c5335e714b4cae231a8e282eaec5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e0e277e418d44f09b849f9e73f4987ae","placeholder":"โ€‹","style":"IPY_MODEL_7637569217f844d48757fea4d5f68dde","value":"tokenizer_config.json:โ€‡100%"}},"ea8ae967e70940678afde0d3ab0dee1f":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a6b94e17d58642989fc52e557c4d524b","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_02c4ffb48da641afb95c555a090d8738","value":366}},"c138a6fa38af473b82b0a92c3306d713":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_42279ff6bd8c468d9923590245f12355","placeholder":"โ€‹","style":"IPY_MODEL_b8f5b8519e0948d79c30fea7d73a36d7","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡32.2kB/s]"}},"018d9c061ad1460aaf0142f7c02f2db1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e0e277e418d44f09b849f9e73f4987ae":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7637569217f844d48757fea4d5f68dde":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a6b94e17d58642989fc52e557c4d524b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"02c4ffb48da641afb95c555a090d8738":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"42279ff6bd8c468d9923590245f12355":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b8f5b8519e0948d79c30fea7d73a36d7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2c0798e0b0a4401b9ad1853c968df38e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_d78d1f0598de44079bfc8ede01e8310b","IPY_MODEL_3f08efcc4c674805a10d31d02c181087","IPY_MODEL_99c0b99a1bfb4feba6a2a836b95f4b43"],"layout":"IPY_MODEL_15236280ace3460da029570ef328b0f3"}},"d78d1f0598de44079bfc8ede01e8310b":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3b4ca46541a048e0892b9504c434ac33","placeholder":"โ€‹","style":"IPY_MODEL_df24eeafe9c94bcf9f2581cc71914d62","value":"vocab.txt:โ€‡"}},"3f08efcc4c674805a10d31d02c181087":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_14f2fd328a644486bb45300130e38a68","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_355e9dc84d9a49a2b4dc7dbc3c11c1e7","value":1}},"99c0b99a1bfb4feba6a2a836b95f4b43":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2bab2235206b4fffb2d1fac131166d5a","placeholder":"โ€‹","style":"IPY_MODEL_c396b2d431b04ba08ff1b7f02f33f861","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡2.59MB/s]"}},"15236280ace3460da029570ef328b0f3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3b4ca46541a048e0892b9504c434ac33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"df24eeafe9c94bcf9f2581cc71914d62":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"14f2fd328a644486bb45300130e38a68":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"355e9dc84d9a49a2b4dc7dbc3c11c1e7":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2bab2235206b4fffb2d1fac131166d5a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c396b2d431b04ba08ff1b7f02f33f861":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"57563ee47e0840d0ae68020a71952849":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_35feb8eda7574e05a262642c22e790c0","IPY_MODEL_8787a4558fc94b1ea28facb926609988","IPY_MODEL_f5bd6636604a457b8773381e206e4aeb"],"layout":"IPY_MODEL_e97ea5f01061459f81c8438a6b865a1f"}},"35feb8eda7574e05a262642c22e790c0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bb3dec7ac05740f2adf47a8dcf9ab22d","placeholder":"โ€‹","style":"IPY_MODEL_67070f7b4ae74bfabd6b03c9b72ad90d","value":"tokenizer.json:โ€‡"}},"8787a4558fc94b1ea28facb926609988":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_abd2a104241c4417b0422ec5636d658c","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_530fee05b318484db49774aea46f0427","value":1}},"f5bd6636604a457b8773381e206e4aeb":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_917701dd815a45d0bfd658a6ae698604","placeholder":"โ€‹","style":"IPY_MODEL_c43ae0bf26604fc783d4191c77b760cd","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡17.2MB/s]"}},"e97ea5f01061459f81c8438a6b865a1f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bb3dec7ac05740f2adf47a8dcf9ab22d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"67070f7b4ae74bfabd6b03c9b72ad90d":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"abd2a104241c4417b0422ec5636d658c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"530fee05b318484db49774aea46f0427":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"917701dd815a45d0bfd658a6ae698604":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c43ae0bf26604fc783d4191c77b760cd":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"cb103c5e92eb4bf796a8a5221cfac47b":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_6d82dd318bce403c9e720adb41a50cba","IPY_MODEL_f9b26b7429ee49699813253b4b4ac1dc","IPY_MODEL_1f6be59d05a8490d8e12266692147a71"],"layout":"IPY_MODEL_f6cbe5e45d2d4d0c90c169c94e9817cf"}},"6d82dd318bce403c9e720adb41a50cba":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f9bf34de1e7946659c7fd71a14c34fa6","placeholder":"โ€‹","style":"IPY_MODEL_cac7e05fb2c34c58bcf347d2184e757a","value":"special_tokens_map.json:โ€‡100%"}},"f9b26b7429ee49699813253b4b4ac1dc":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_21494b02a50f439bae51b8cbc5bb4328","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c197d319ff984ff3a101504fc827ab19","value":125}},"1f6be59d05a8490d8e12266692147a71":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a537db1dc3de47418a1f1951cfc32174","placeholder":"โ€‹","style":"IPY_MODEL_2aeab274acd2427fb7124b0ca46198ca","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡9.95kB/s]"}},"f6cbe5e45d2d4d0c90c169c94e9817cf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f9bf34de1e7946659c7fd71a14c34fa6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cac7e05fb2c34c58bcf347d2184e757a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"21494b02a50f439bae51b8cbc5bb4328":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c197d319ff984ff3a101504fc827ab19":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a537db1dc3de47418a1f1951cfc32174":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2aeab274acd2427fb7124b0ca46198ca":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bea45ab851dd406cb15bd92e322fba4b":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_2022df3a4b36495fa2ed7ca10e25ffa9","IPY_MODEL_ed3887c759c34fbba2305f680bb81458","IPY_MODEL_1ca4d8518b884920ac4ae9bd4e9cbd64"],"layout":"IPY_MODEL_003acc689d884bb78436307a87a58400"}},"2022df3a4b36495fa2ed7ca10e25ffa9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0548282c435a41398e84d20f71edb39a","placeholder":"โ€‹","style":"IPY_MODEL_123fe38481324a20a899ca44c8748258","value":"config.json:โ€‡100%"}},"ed3887c759c34fbba2305f680bb81458":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e9456cac3dee45329f42d6ebbfb08788","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_0ca714858a0b40ec978bfbbef638b5d0","value":190}},"1ca4d8518b884920ac4ae9bd4e9cbd64":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b60c5eae9e584c5d9f6ec003d688865c","placeholder":"โ€‹","style":"IPY_MODEL_68853a910341481bb354b571820e29fc","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡16.1kB/s]"}},"003acc689d884bb78436307a87a58400":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0548282c435a41398e84d20f71edb39a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"123fe38481324a20a899ca44c8748258":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e9456cac3dee45329f42d6ebbfb08788":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0ca714858a0b40ec978bfbbef638b5d0":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b60c5eae9e584c5d9f6ec003d688865c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"68853a910341481bb354b571820e29fc":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Chandra_Sekhar_Yandra/README.md b/Chandra_Sekhar_Yandra/README.md deleted file mode 100644 index dd9649e..0000000 --- a/Chandra_Sekhar_Yandra/README.md +++ /dev/null @@ -1 +0,0 @@ -# Chandra_Sekhar_Yandra diff --git a/Chandrap/README.md b/Chandrap/README.md deleted file mode 100644 index e92c959..0000000 --- a/Chandrap/README.md +++ /dev/null @@ -1 +0,0 @@ -# Chandrap diff --git a/Debashis_Nayak/README.md b/Debashis_Nayak/README.md deleted file mode 100644 index 62cc05a..0000000 --- a/Debashis_Nayak/README.md +++ /dev/null @@ -1 +0,0 @@ -# Debashis_Nayak diff --git a/Deepak_SIngh/README.md b/Deepak_SIngh/README.md deleted file mode 100644 index 54d4ff8..0000000 --- a/Deepak_SIngh/README.md +++ /dev/null @@ -1 +0,0 @@ -# Deepak_SIngh diff --git a/Dineshbabu_Sengottian/README.md b/Dineshbabu_Sengottian/README.md deleted file mode 100644 index 9c1f44d..0000000 --- a/Dineshbabu_Sengottian/README.md +++ /dev/null @@ -1 +0,0 @@ -# Dineshbabu_Sengottian diff --git a/Durga_Prasad_Chimmili/README.md b/Durga_Prasad_Chimmili/README.md deleted file mode 100644 index 7e593f1..0000000 --- a/Durga_Prasad_Chimmili/README.md +++ /dev/null @@ -1 +0,0 @@ -# Durga_Prasad_Chimmili diff --git a/Gaurav/README.md b/Gaurav/README.md deleted file mode 100644 index 40de3c5..0000000 --- a/Gaurav/README.md +++ /dev/null @@ -1 +0,0 @@ -# Gaurav diff --git a/Georgy_Pulivilayil_James/README.md b/Georgy_Pulivilayil_James/README.md deleted file mode 100644 index a56dd51..0000000 --- a/Georgy_Pulivilayil_James/README.md +++ /dev/null @@ -1 +0,0 @@ -# Georgy_Pulivilayil_James diff --git a/Girish_Basavaraj_Hiremath/README.md b/Girish_Basavaraj_Hiremath/README.md deleted file mode 100644 index 9833cf1..0000000 --- a/Girish_Basavaraj_Hiremath/README.md +++ /dev/null @@ -1 +0,0 @@ -# Girish_Basavaraj_Hiremath diff --git a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_2_GUIDE.md b/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_2_GUIDE.md deleted file mode 100644 index 00619f2..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_2_GUIDE.md +++ /dev/null @@ -1,357 +0,0 @@ -# ๐Ÿš€ Assignment 2: Advanced RAG Techniques - Implementation Guide - -**Status**: Ready to start! โœ… Assignment 1 Complete - -This guide provides step-by-step help for completing Assignment 2's four main functions. - ---- - -## ๐Ÿ“‹ Quick Overview - -**What You'll Build:** -1. โœ… **Similarity Postprocessor** - Filter low-relevance results -2. โœ… **TreeSummarize Engine** - Generate comprehensive responses -3. โœ… **Structured Outputs** - Create type-safe JSON responses -4. โœ… **Advanced Pipeline** - Combine all techniques - -**Estimated Time**: 60-90 minutes -**Difficulty**: Intermediate - ---- - -## โœ… Prerequisites Check - -Before starting, verify your setup: - -```python -# Run this to verify API key and index -import os -api_key = os.getenv("OPENROUTER_API_KEY") -print("โœ… API Key found!" if api_key else "โš ๏ธ API Key missing") - -# The assignment should have created an index already -if 'index' in globals(): - print(f"โœ… Index ready: {index is not None}") -else: - print("โŒ Run the setup cells first") -``` - ---- - -## ๐Ÿ”ง Function 1: Similarity Postprocessor - -**Goal**: Filter out low-relevance results - -**Reference Pattern** (from tutorial): -```python -similarity_processor = SimilarityPostprocessor(similarity_cutoff=0.3) -query_engine = index.as_query_engine( - similarity_top_k=top_k, - node_postprocessors=[similarity_processor] -) -``` - -**Your Implementation**: -```python -def create_query_engine_with_similarity_filter(index, similarity_cutoff: float = 0.3, top_k: int = 10): - # Create similarity postprocessor - similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff) - - # Create query engine with the postprocessor - query_engine = index.as_query_engine( - similarity_top_k=top_k, - node_postprocessors=[similarity_processor] - ) - - return query_engine -``` - -**What This Does**: -- Filters out results below the similarity threshold -- Improves precision by removing noise -- Still retrieves `top_k` initially, then filters - -**Test After Completion**: -```python -filtered_engine = create_query_engine_with_similarity_filter(index, similarity_cutoff=0.3) -response = filtered_engine.query("What are the benefits of AI agents?") -print(response) -``` - ---- - -## ๐ŸŒณ Function 2: TreeSummarize Engine - -**Goal**: Generate comprehensive analytical responses - -**Reference Pattern** (from tutorial): -```python -tree_synthesizer = TreeSummarize() -query_engine = index.as_query_engine( - response_synthesizer=tree_synthesizer, - similarity_top_k=8 -) -``` - -**Your Implementation**: -```python -def create_query_engine_with_tree_summarize(index, top_k: int = 5): - # Create TreeSummarize response synthesizer - tree_synthesizer = TreeSummarize() - - # Create query engine with the synthesizer - query_engine = index.as_query_engine( - similarity_top_k=top_k, - response_synthesizer=tree_synthesizer - ) - - return query_engine -``` - -**What This Does**: -- Builds responses hierarchically (groups related chunks) -- Better for complex analytical questions -- More comprehensive than simple summarization - -**Test After Completion**: -```python -tree_engine = create_query_engine_with_tree_summarize(index) -response = tree_engine.query("Compare the advantages and disadvantages of different AI agent frameworks") -print(response) -``` - ---- - -## ๐Ÿ“Š Function 3: Structured Outputs - -**Goal**: Create type-safe JSON responses using Pydantic - -**Reference Pattern** (from tutorial): -```python -output_parser = PydanticOutputParser(RecipeInfo) -program = LLMTextCompletionProgram.from_defaults( - output_parser=output_parser, - prompt_template_str=( - "Extract structured information from the following context:\n" - "{context}\n\n" - "Question: {query}\n\n" - "Extract the information and return it in the specified format." - ) -) -``` - -**Your Implementation**: -```python -def create_structured_output_program(output_model: BaseModel = ResearchPaperInfo): - # Create output parser with the Pydantic model - output_parser = PydanticOutputParser(output_model) - - # Create the structured output program - program = LLMTextCompletionProgram.from_defaults( - output_parser=output_parser, - prompt_template_str=( - "Extract structured information from the following context:\n" - "{context}\n\n" - "Question: {query}\n\n" - "Extract the information and return it in the specified format." - ) - ) - - return program -``` - -**What This Does**: -- Returns structured JSON matching your Pydantic model -- Type-safe with automatic validation -- Perfect for API endpoints - -**Test After Completion**: -```python -structured_program = create_structured_output_program(ResearchPaperInfo) - -# Get context first -retriever = VectorIndexRetriever(index=index, similarity_top_k=3) -nodes = retriever.retrieve("Tell me about AI agents") -context = "\n".join([node.text for node in nodes]) - -# Get structured output -response = structured_program(context=context, query="Tell me about AI agents") -print(response) -print(type(response)) # Should be ResearchPaperInfo instance -``` - ---- - -## ๐ŸŽฏ Function 4: Advanced Pipeline - -**Goal**: Combine similarity filtering + TreeSummarize for production-ready RAG - -**Reference Pattern** (from tutorial): -```python -similarity_processor = SimilarityPostprocessor(similarity_cutoff=0.3) -tree_synthesizer = TreeSummarize() -query_engine = index.as_query_engine( - similarity_top_k=10, - node_postprocessors=[similarity_processor], - response_synthesizer=tree_synthesizer -) -``` - -**Your Implementation**: -```python -def create_advanced_rag_pipeline(index, similarity_cutoff: float = 0.3, top_k: int = 10): - # Create similarity postprocessor - similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff) - - # Create TreeSummarize for comprehensive responses - tree_synthesizer = TreeSummarize() - - # Create the comprehensive query engine combining both techniques - advanced_engine = index.as_query_engine( - similarity_top_k=top_k, - node_postprocessors=[similarity_processor], - response_synthesizer=tree_synthesizer - ) - - return advanced_engine -``` - -**What This Does**: -- Filters results first (improves precision) -- Then synthesizes comprehensive response (improves quality) -- Best of both worlds! - -**Test After Completion**: -```python -advanced_pipeline = create_advanced_rag_pipeline(index) -response = advanced_pipeline.query("Analyze the current state and future potential of AI agent technologies") -print(response) -``` - ---- - -## ๐Ÿ” Step-by-Step Workflow - -### Step 1: Open Assignment 2 Notebook -``` -assignments/assignment_2_advanced_rag.ipynb -``` - -### Step 2: Run Setup Cells -- Run the import cell (Cell 1) โœ… -- Run the settings cell (Cell 2) โœ… -- Run the index setup cell (Cell 3) โœ… - -**Expected**: Index created successfully with documents loaded - -### Step 3: Complete Function 1 (Similarity Postprocessor) -1. Read the explanation (Cell 4) -2. Find the TODO function (Cell 5) -3. Implement using the pattern above -4. Run the test cell -5. Uncomment the test query to verify - -### Step 4: Complete Function 2 (TreeSummarize) -1. Read the explanation (Cell 6) -2. Find the TODO function (Cell 7) -3. Implement using the pattern above -4. Run the test cell -5. Uncomment the test query to verify - -### Step 5: Complete Function 3 (Structured Outputs) -1. Read the explanation (Cell 8) -2. Review the Pydantic model (Cell 9 - already defined) -3. Complete the TODO function -4. Run the test cell -5. Uncomment the retrieval and program calls - -### Step 6: Complete Function 4 (Advanced Pipeline) -1. Read the explanation (Cell 10) -2. Find the TODO function (Cell 11) -3. Combine both techniques from Functions 1 & 2 -4. Run the test cell -5. Uncomment the test query - -### Step 7: Final Test (Cell 13) -- Run the final comparison cell -- Compare basic vs advanced RAG -- All components should show โœ… - ---- - -## ๐Ÿ› Troubleshooting - -### Issue: "OPENROUTER_API_KEY not found" -**Solution**: Set environment variable (already done in your QUICK_REFERENCE.md) - -### Issue: "Index is None" -**Solution**: Run Cell 3 (setup_basic_index) first - -### Issue: "TreeSummarize takes too long" -**Solution**: Normal! TreeSummarize processes hierarchically (3-8 seconds) - -### Issue: "Pydantic validation errors" -**Solution**: Check that your prompt extracts all required fields (title, key_points, applications, summary) - -### Issue: "No results after similarity filter" -**Solution**: Lower similarity_cutoff (try 0.2 instead of 0.3) - -### Issue: "Import errors" -**Solution**: Verify all imports in Cell 1 ran successfully - ---- - -## โœ… Success Checklist - -- [ ] All 4 functions completed without TODO comments -- [ ] Similarity filter removes low-relevance results -- [ ] TreeSummarize generates comprehensive responses -- [ ] Structured output returns valid Pydantic model -- [ ] Advanced pipeline combines both techniques -- [ ] Final test cell shows all components โœ… -- [ ] Can compare basic vs advanced RAG responses - ---- - -## ๐Ÿ’ก Key Concepts You'll Learn - -1. **Postprocessors**: Improve retrieval precision by filtering results -2. **Response Synthesizers**: Control how retrieved info becomes answers -3. **Structured Outputs**: Enable reliable system integration -4. **Pipeline Design**: Combine techniques for production systems - ---- - -## ๐ŸŽฏ Expected Final Output - -When you complete all functions, the final test should show: - -``` -๐Ÿš€ Advanced RAG Techniques Assignment - Final Test -============================================================ - -๐Ÿ“Š Component Status: - โœ… Basic Index - โœ… Similarity Filter - โœ… TreeSummarize - โœ… Structured Output - โœ… Advanced Pipeline - -๐Ÿ†š COMPARISON: Basic vs Advanced RAG -... -๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques! -``` - ---- - -## ๐Ÿš€ Next Steps After Completion - -1. Experiment with different similarity_cutoff values (0.2, 0.4, 0.5) -2. Try other synthesizers: `Refine()`, `CompactAndRefine()` -3. Create custom Pydantic models for different domains -4. Move to Assignment 3a: Basic Gradio Interface - ---- - -**Ready to start? Open `assignment_2_advanced_rag.ipynb` and begin with Function 1! Good luck! ๐ŸŽ‰** - diff --git a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_CELL_3_COMPLETE.py b/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_CELL_3_COMPLETE.py deleted file mode 100644 index 07a1b39..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_CELL_3_COMPLETE.py +++ /dev/null @@ -1,179 +0,0 @@ -# ============================================================ -# COMPLETE CODE FOR ASSIGNMENT 3B - CELL 3 -# Copy and paste this entire code into Cell 3 of your notebook -# ============================================================ - -def create_advanced_rag_interface(): - """Create advanced RAG interface with full configuration options.""" - - def initialize_db(): - """Handle database initialization.""" - return rag_backend.initialize_database() - - def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap, - similarity_top_k, postprocessors, similarity_cutoff, synthesizer): - """Handle advanced RAG queries with all configuration options.""" - result = rag_backend.advanced_query( - question, model, temperature, chunk_size, chunk_overlap, - similarity_top_k, postprocessors, similarity_cutoff, synthesizer - ) - - # Format configuration for display - config_text = f"""**Current Configuration:** -- Model: {result['config'].get('model', 'N/A')} -- Temperature: {result['config'].get('temperature', 'N/A')} -- Chunk Size: {result['config'].get('chunk_size', 'N/A')} -- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')} -- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')} -- Postprocessors: {', '.join(result['config'].get('postprocessors', []))} -- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')} -- Synthesizer: {result['config'].get('synthesizer', 'N/A')}""" - - return result["response"], config_text - - # Create the advanced interface structure - with gr.Blocks(title="Advanced RAG Assistant") as interface: - # Add title and description - gr.Markdown("# ๐Ÿš€ Advanced RAG Assistant") - gr.Markdown("Configure and query your RAG system with advanced parameters!") - gr.Markdown("---") - - # Add database initialization section - gr.Markdown("## ๐Ÿ“ Step 1: Initialize Database") - init_btn = gr.Button("๐Ÿš€ Initialize Vector Database", variant="primary") - status_output = gr.Textbox( - label="Initialization Status", - placeholder="Click 'Initialize Vector Database' to begin...", - interactive=False, - lines=2 - ) - - gr.Markdown("---") - - # Create main layout with columns - gr.Markdown("## โš™๏ธ Configure & Query") - with gr.Row(): - with gr.Column(scale=1): - - gr.Markdown("### โš™๏ธ RAG Configuration") - - # Model selection - model_dropdown = gr.Dropdown( - choices=["gpt-4o", "gpt-4o-mini"], - value="gpt-4o-mini", - label="Model", - info="Choose the LLM model for responses" - ) - - # Temperature control - temperature_slider = gr.Slider( - minimum=0.0, - maximum=1.0, - step=0.1, - value=0.1, - label="Temperature", - info="0.0 = deterministic, 1.0 = creative" - ) - - # Chunking parameters - chunk_size_input = gr.Number( - value=512, - label="Chunk Size", - info="Size of document chunks (256-1024)", - minimum=256, - maximum=1024, - step=64 - ) - - chunk_overlap_input = gr.Number( - value=50, - label="Chunk Overlap", - info="Overlap between chunks (10-200)", - minimum=10, - maximum=200, - step=10 - ) - - # Retrieval parameters - similarity_topk_slider = gr.Slider( - minimum=1, - maximum=20, - step=1, - value=5, - label="Similarity Top-K", - info="Number of documents to retrieve (1-20)" - ) - - # Postprocessor selection - postprocessor_checkbox = gr.CheckboxGroup( - choices=["SimilarityPostprocessor"], - label="Node Postprocessors", - info="Filter and refine retrieval results" - ) - - # Similarity filtering - similarity_cutoff_slider = gr.Slider( - minimum=0.0, - maximum=1.0, - step=0.05, - value=0.3, - label="Similarity Cutoff", - info="Minimum relevance score (0.0-1.0)" - ) - - # Response synthesizer - synthesizer_dropdown = gr.Dropdown( - choices=["TreeSummarize", "Refine", "CompactAndRefine", "Default"], - value="Default", - label="Response Synthesizer", - info="How to combine retrieved information" - ) - - with gr.Column(scale=2): - gr.Markdown("### ๐Ÿ’ฌ Query Interface") - - # Query input - query_input = gr.Textbox( - label="Ask a question", - placeholder="e.g., What are the main topics in the documents?", - lines=3 - ) - - # Submit button - submit_btn = gr.Button("๐Ÿ” Ask Question", variant="primary") - - # Response output - response_output = gr.Textbox( - label="AI Response", - placeholder="Your response will appear here...", - interactive=False, - lines=12 - ) - - # Configuration display - config_display = gr.Textbox( - label="Configuration Used", - placeholder="Configuration details will appear here after query...", - interactive=False, - lines=8 - ) - - # Connect functions to components - init_btn.click(initialize_db, outputs=[status_output]) - - submit_btn.click( - handle_advanced_query, - inputs=[ - query_input, model_dropdown, temperature_slider, - chunk_size_input, chunk_overlap_input, similarity_topk_slider, - postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown - ], - outputs=[response_output, config_display] - ) - - return interface - -# Create the interface -advanced_interface = create_advanced_rag_interface() -print("โœ… Advanced RAG interface created successfully!") - diff --git a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_COMPLETE_CODE.py b/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_COMPLETE_CODE.py deleted file mode 100644 index 864bf5d..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_COMPLETE_CODE.py +++ /dev/null @@ -1,177 +0,0 @@ -# Complete code for Assignment 3b - Cell 3 (Part 3: Advanced Gradio Interface) -# Copy this code to replace the TODO section in Cell 3 - -def create_advanced_rag_interface(): - """Create advanced RAG interface with full configuration options.""" - - def initialize_db(): - """Handle database initialization.""" - return rag_backend.initialize_database() - - def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap, - similarity_top_k, postprocessors, similarity_cutoff, synthesizer): - """Handle advanced RAG queries with all configuration options.""" - result = rag_backend.advanced_query( - question, model, temperature, chunk_size, chunk_overlap, - similarity_top_k, postprocessors, similarity_cutoff, synthesizer - ) - - # Format configuration for display - config_text = f"""**Current Configuration:** -- Model: {result['config'].get('model', 'N/A')} -- Temperature: {result['config'].get('temperature', 'N/A')} -- Chunk Size: {result['config'].get('chunk_size', 'N/A')} -- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')} -- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')} -- Postprocessors: {', '.join(result['config'].get('postprocessors', []))} -- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')} -- Synthesizer: {result['config'].get('synthesizer', 'N/A')}""" - - return result["response"], config_text - - # Create the advanced interface structure - with gr.Blocks(title="Advanced RAG Assistant") as interface: - # Add title and description - gr.Markdown("# ๐Ÿš€ Advanced RAG Assistant") - gr.Markdown("Configure and query your RAG system with advanced parameters!") - gr.Markdown("---") - - # Add database initialization section - gr.Markdown("## ๐Ÿ“ Step 1: Initialize Database") - init_btn = gr.Button("๐Ÿš€ Initialize Vector Database", variant="primary") - status_output = gr.Textbox( - label="Initialization Status", - placeholder="Click 'Initialize Vector Database' to begin...", - interactive=False, - lines=2 - ) - - gr.Markdown("---") - - # Create main layout with columns - gr.Markdown("## โš™๏ธ Configure & Query") - with gr.Row(): - with gr.Column(scale=1): - - gr.Markdown("### โš™๏ธ RAG Configuration") - - # Model selection - model_dropdown = gr.Dropdown( - choices=["gpt-4o", "gpt-4o-mini"], - value="gpt-4o-mini", - label="Model", - info="Choose the LLM model for responses" - ) - - # Temperature control - temperature_slider = gr.Slider( - minimum=0.0, - maximum=1.0, - step=0.1, - value=0.1, - label="Temperature", - info="0.0 = deterministic, 1.0 = creative" - ) - - # Chunking parameters - chunk_size_input = gr.Number( - value=512, - label="Chunk Size", - info="Size of document chunks (256-1024)", - minimum=256, - maximum=1024, - step=64 - ) - - chunk_overlap_input = gr.Number( - value=50, - label="Chunk Overlap", - info="Overlap between chunks (10-200)", - minimum=10, - maximum=200, - step=10 - ) - - # Retrieval parameters - similarity_topk_slider = gr.Slider( - minimum=1, - maximum=20, - step=1, - value=5, - label="Similarity Top-K", - info="Number of documents to retrieve (1-20)" - ) - - # Postprocessor selection - postprocessor_checkbox = gr.CheckboxGroup( - choices=["SimilarityPostprocessor"], - label="Node Postprocessors", - info="Filter and refine retrieval results" - ) - - # Similarity filtering - similarity_cutoff_slider = gr.Slider( - minimum=0.0, - maximum=1.0, - step=0.05, - value=0.3, - label="Similarity Cutoff", - info="Minimum relevance score (0.0-1.0)" - ) - - # Response synthesizer - synthesizer_dropdown = gr.Dropdown( - choices=["TreeSummarize", "Refine", "CompactAndRefine", "Default"], - value="Default", - label="Response Synthesizer", - info="How to combine retrieved information" - ) - - with gr.Column(scale=2): - gr.Markdown("### ๐Ÿ’ฌ Query Interface") - - # Query input - query_input = gr.Textbox( - label="Ask a question", - placeholder="e.g., What are the main topics in the documents?", - lines=3 - ) - - # Submit button - submit_btn = gr.Button("๐Ÿ” Ask Question", variant="primary") - - # Response output - response_output = gr.Textbox( - label="AI Response", - placeholder="Your response will appear here...", - interactive=False, - lines=12 - ) - - # Configuration display - config_display = gr.Textbox( - label="Configuration Used", - placeholder="Configuration details will appear here after query...", - interactive=False, - lines=8 - ) - - # Connect functions to components - init_btn.click(initialize_db, outputs=[status_output]) - - submit_btn.click( - handle_advanced_query, - inputs=[ - query_input, model_dropdown, temperature_slider, - chunk_size_input, chunk_overlap_input, similarity_topk_slider, - postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown - ], - outputs=[response_output, config_display] - ) - - return interface - -# Create the interface -advanced_interface = create_advanced_rag_interface() -print("โœ… Advanced RAG interface created successfully!") - diff --git a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_IMPORTS.py b/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_IMPORTS.py deleted file mode 100644 index d4ed9cf..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_3B_IMPORTS.py +++ /dev/null @@ -1,29 +0,0 @@ -# Complete code for Assignment 3b - Cell 1 (Part 1: Setup and Imports) -# Replace Cell 1 content with this: - -# Import all required libraries -import gradio as gr -import os -from pathlib import Path -from typing import Dict, List, Optional, Any - -# Load environment variables from .env file -try: - from dotenv import load_dotenv - load_dotenv() # Load .env file if it exists -except ImportError: - print("โš ๏ธ python-dotenv not installed. Install with: pip install python-dotenv") - -# LlamaIndex core components -from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings -from llama_index.vector_stores.lancedb import LanceDBVectorStore -from llama_index.embeddings.huggingface import HuggingFaceEmbedding -from llama_index.llms.openrouter import OpenRouter - -# Advanced RAG components -from llama_index.core.postprocessor import SimilarityPostprocessor -from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine -from llama_index.core.retrievers import VectorIndexRetriever - -print("โœ… All libraries imported successfully!") - diff --git a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_PLAN.md b/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_PLAN.md deleted file mode 100644 index 7e5d0db..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/ASSIGNMENT_PLAN.md +++ /dev/null @@ -1,708 +0,0 @@ -# ๐Ÿ“‹ Complete Assignment Completion Plan -## Day 6 Session 2 - RAG System Development - -This comprehensive plan will guide you through completing all assignments in the correct order with clear steps and helpful resources. - ---- - -## ๐ŸŽฏ Quick Start Checklist - -- [ ] **Step 0**: Prerequisites Setup -- [ ] **Assignment 1**: Vector Database Basics -- [ ] **Assignment 2**: Advanced RAG Techniques -- [ ] **Assignment 3a**: Basic Gradio Interface -- [ ] **Assignment 3b**: Advanced Gradio Interface - ---- - -## ๐Ÿ“ฆ STEP 0: Prerequisites & Environment Setup - -### 0.1 Install Dependencies - -```bash -# Navigate to the session folder -cd Day_6/session_2 - -# Install all required packages -pip install -r requirements.txt -``` - -**Expected Outcome**: All packages install successfully without errors - -**Common Issues**: -- If `lancedb` fails: Update pip first with `pip install --upgrade pip` -- If `llama-index` fails: Install individually: `pip install llama-index llama-index-vector-stores-lancedb` - -### 0.2 Configure API Keys (Optional but Recommended) - -For **Assignment 1**: โŒ **No API key needed** (uses local embeddings only) - -For **Assignment 2+**: โœ… **OpenRouter API key recommended** (for LLM responses) - -**Setup OpenRouter API Key** (for Assignments 2+): -1. Go to https://openrouter.ai/ -2. Create account and get API key -3. Set environment variable: - -```bash -# Windows (Command Prompt) -set OPENROUTER_API_KEY=your_api_key_here - -# Windows (PowerShell) -$env:OPENROUTER_API_KEY="your_api_key_here" - -# Linux/Mac -export OPENROUTER_API_KEY=your_api_key_here -``` - -**Or create `.env` file** in `session_2/` folder: -``` -OPENROUTER_API_KEY=your_api_key_here -``` - -### 0.3 Verify Installation - -Open a new Python notebook and test: - -```python -from llama_index.core import SimpleDirectoryReader -from llama_index.vector_stores.lancedb import LanceDBVectorStore -import lancedb - -print("โœ… All dependencies installed successfully!") -``` - -**Expected Outcome**: No import errors - ---- - -## ๐Ÿ“š STEP 1: Assignment 1 - Vector Database Basics - -**Time Estimate**: 45-60 minutes -**Difficulty**: Beginner -**Prerequisites**: None (basic Python knowledge) - -### 1.1 Understanding the Assignment - -**Goal**: Build a complete vector database system from scratch - -**Key Functions to Complete**: -1. `load_documents_from_folder()` - Load documents using SimpleDirectoryReader -2. `create_vector_store()` - Create LanceDB vector store -3. `create_vector_index()` - Build vector index from documents -4. `search_documents()` - Implement semantic search - -### 1.2 Step-by-Step Approach - -#### Step 1: Open Assignment Notebook -```bash -# Navigate to assignments folder -cd assignments -# Open: assignment_1_vector_db_basics.ipynb -``` - -#### Step 2: Read Reference Tutorial First -**๐Ÿ“– Reference**: `../llamaindex_rag/01_academic_papers_rag.ipynb` - -**What to Look For**: -- How `SimpleDirectoryReader` is used (around Cell 10-15) -- How `LanceDBVectorStore` is created -- How `StorageContext` is set up -- How `VectorStoreIndex.from_documents()` works -- How `as_retriever()` and `retrieve()` are used - -#### Step 3: Complete Function 1 - Document Loading - -**Function**: `load_documents_from_folder()` - -**Hint from Tutorial**: Look for `SimpleDirectoryReader(input_dir=..., recursive=True)` - -**Expected Pattern**: -```python -reader = SimpleDirectoryReader(input_dir=folder_path, recursive=True) -documents = reader.load_data() -return documents -``` - -**Test**: Run the test cell - should load documents from `../data` folder - -#### Step 4: Complete Function 2 - Vector Store Creation - -**Function**: `create_vector_store()` - -**Hint from Tutorial**: Look for `LanceDBVectorStore(uri=..., table_name=...)` - -**Expected Pattern**: -```python -Path(db_path).mkdir(parents=True, exist_ok=True) -vector_store = LanceDBVectorStore(uri=db_path, table_name=table_name) -return vector_store -``` - -**Test**: Run the test cell - should create vector store without errors - -#### Step 5: Complete Function 3 - Vector Index Creation - -**Function**: `create_vector_index()` - -**Hint from Tutorial**: Look for `StorageContext.from_defaults(vector_store=...)` then `VectorStoreIndex.from_documents(...)` - -**Expected Pattern**: -```python -storage_context = StorageContext.from_defaults(vector_store=vector_store) -index = VectorStoreIndex.from_documents( - documents, - storage_context=storage_context, - show_progress=True -) -return index -``` - -**Test**: This will take 1-2 minutes to process all documents and create embeddings - -#### Step 6: Complete Function 4 - Document Search - -**Function**: `search_documents()` - -**Hint from Tutorial**: Look for `index.as_retriever(similarity_top_k=...)` then `retriever.retrieve(query)` - -**Expected Pattern**: -```python -retriever = index.as_retriever(similarity_top_k=top_k) -results = retriever.retrieve(query) -return results -``` - -**Test**: Run with test query - should return relevant document nodes - -#### Step 7: Run Final Test Pipeline - -The assignment includes a final test cell that: -- Runs the complete pipeline -- Tests multiple search queries -- Validates all functions work together - -**Expected Output**: -``` -๐Ÿš€ Testing Complete Vector Database Pipeline -================================================== -๐Ÿ“‚ Step 1: Loading documents... - Loaded [number] documents -๐Ÿ—„๏ธ Step 2: Creating vector store... - Vector store status: โœ… Created -๐Ÿ”— Step 3: Creating vector index... - Index status: โœ… Created -๐Ÿ” Step 4: Testing search functionality... - [Multiple search results displayed] -๐ŸŽ‰ Congratulations! You've successfully completed the assignment! -``` - -### 1.3 Troubleshooting Assignment 1 - -**Issue**: "No module named 'llama_index'" -- **Solution**: Run `pip install llama-index` and restart kernel - -**Issue**: "Documents not loading" -- **Solution**: Check path - should be `../data` relative to notebook location - -**Issue**: "Vector store creation fails" -- **Solution**: Ensure `lancedb` is installed: `pip install lancedb` - -**Issue**: "Index creation is slow" -- **Solution**: Normal! Processing documents and creating embeddings takes 1-3 minutes - -**Issue**: "Search returns no results" -- **Solution**: Ensure index was created successfully - check previous cells - -### 1.4 Success Criteria - -โœ… All 4 functions completed without TODO comments -โœ… Documents load successfully (should see 10+ documents) -โœ… Vector store created in `./assignment_vectordb` folder -โœ… Index creation completes with progress bar -โœ… Search returns relevant results with similarity scores -โœ… Final test pipeline runs successfully - ---- - -## ๐Ÿš€ STEP 2: Assignment 2 - Advanced RAG Techniques - -**Time Estimate**: 60-90 minutes -**Difficulty**: Intermediate -**Prerequisites**: โœ… Complete Assignment 1 first - -### 2.1 Understanding the Assignment - -**Goal**: Implement advanced RAG techniques for production-quality systems - -**Key Techniques to Implement**: -1. **Similarity Postprocessor** - Filter low-relevance results -2. **TreeSummarize Engine** - Generate comprehensive responses -3. **Structured Outputs** - Create type-safe JSON responses -4. **Advanced Pipeline** - Combine all techniques - -### 2.2 Step-by-Step Approach - -#### Step 1: Verify Prerequisites - -**Before Starting**: Ensure Assignment 1 is complete and you understand: -- How vector indexes work -- How retrievers function -- Basic RAG concepts - -#### Step 2: Open Assignment Notebook -```bash -# Open: assignment_2_advanced_rag.ipynb -``` - -#### Step 3: Read Reference Tutorial -**๐Ÿ“– Reference**: `../llamaindex_rag/03_advanced_rag_techniques.ipynb` - -**Key Sections to Study**: -- SimilarityPostprocessor usage -- TreeSummarize vs Refine synthesizers -- Pydantic models for structured outputs -- RetrieverQueryEngine setup - -#### Step 4: Setup API Key (If Not Done) - -For this assignment, OpenRouter API key is **highly recommended** for full functionality. - -**Verify Setup**: -```python -import os -api_key = os.getenv("OPENROUTER_API_KEY") -print("API Key found!" if api_key else "API Key missing - LLM features limited") -``` - -#### Step 5: Complete Technique 1 - Similarity Postprocessor - -**Function**: Likely named `apply_similarity_filter()` or similar - -**What It Does**: Filters retrieved results below a similarity threshold - -**Hint from Tutorial**: Look for `SimilarityPostprocessor(similarity_cutoff=...)` - -**Expected Pattern**: -```python -postprocessor = SimilarityPostprocessor(similarity_cutoff=0.7) -filtered_results = postprocessor.postprocess_nodes(nodes, query=query) -return filtered_results -``` - -**Test**: Compare results before/after filtering - should see fewer but more relevant results - -#### Step 6: Complete Technique 2 - TreeSummarize Engine - -**Function**: Likely named `create_treesummarize_engine()` or similar - -**What It Does**: Creates a query engine that synthesizes comprehensive responses - -**Hint from Tutorial**: Look for `TreeSummarize()` and `RetrieverQueryEngine` - -**Expected Pattern**: -```python -retriever = index.as_retriever(similarity_top_k=top_k) -synthesizer = TreeSummarize() -query_engine = RetrieverQueryEngine( - retriever=retriever, - response_synthesizer=synthesizer -) -return query_engine -``` - -**Test**: Query should return comprehensive analysis, not just retrieved chunks - -#### Step 7: Complete Technique 3 - Structured Outputs - -**Function**: Likely named `create_structured_engine()` or similar - -**What It Does**: Creates responses in structured JSON format using Pydantic models - -**Hint from Tutorial**: Look for `PydanticOutputParser`, Pydantic models, and structured query engines - -**Expected Pattern**: -```python -class ResponseModel(BaseModel): - answer: str = Field(description="Main answer") - sources: List[str] = Field(description="Source documents") - confidence: float = Field(description="Confidence score") - -output_parser = PydanticOutputParser(ResponseModel) -# Use with query engine... -``` - -**Test**: Response should be valid JSON matching the Pydantic model - -#### Step 8: Complete Technique 4 - Advanced Pipeline - -**Function**: Likely named `create_advanced_pipeline()` or similar - -**What It Does**: Combines postprocessor, synthesizer, and structured outputs - -**Expected Pattern**: -```python -retriever = index.as_retriever(similarity_top_k=top_k) -postprocessors = [SimilarityPostprocessor(similarity_cutoff=0.7)] -synthesizer = TreeSummarize() -query_engine = RetrieverQueryEngine( - retriever=retriever, - node_postprocessors=postprocessors, - response_synthesizer=synthesizer -) -return query_engine -``` - -**Test**: Full pipeline should produce high-quality structured responses - -### 2.3 Troubleshooting Assignment 2 - -**Issue**: "OpenRouter API key not found" -- **Solution**: Set environment variable or create `.env` file (see Step 0.2) - -**Issue**: "TreeSummarize takes too long" -- **Solution**: Normal for first run - reduces chunk count or adjust parameters - -**Issue**: "Pydantic validation errors" -- **Solution**: Check model field definitions match actual response structure - -**Issue**: "No results after filtering" -- **Solution**: Lower similarity_cutoff value (try 0.5 instead of 0.7) - -### 2.4 Success Criteria - -โœ… Similarity postprocessor filters results correctly -โœ… TreeSummarize generates comprehensive responses -โœ… Structured outputs match Pydantic model schema -โœ… Advanced pipeline combines all techniques successfully -โœ… Responses are higher quality than basic retrieval - ---- - -## ๐ŸŽจ STEP 3: Assignment 3a - Basic Gradio Interface - -**Time Estimate**: 45-60 minutes -**Difficulty**: Intermediate -**Prerequisites**: โœ… Complete Assignments 1 & 2 - -### 3.1 Understanding the Assignment - -**Goal**: Build a simple Gradio web interface for your RAG system - -**Key Components**: -1. Database initialization button -2. Query input textbox -3. Response output display -4. Status messages - -### 3.2 Step-by-Step Approach - -#### Step 1: Review Gradio Basics - -If you haven't used Gradio before, review basic concepts: -- `gr.Blocks()` - Custom layouts -- `gr.Button()` - Interactive buttons -- `gr.Textbox()` - Input/output fields -- `.click()` - Event handlers - -#### Step 2: Open Assignment Notebook -```bash -# Open: assignment_3a_basic_gradio_rag.ipynb -``` - -#### Step 3: Complete Backend Functions - -**Functions to Complete**: -1. Database initialization function -2. Query processing function - -**Hint**: Reuse code from Assignments 1 & 2! - -**Expected Pattern for Init**: -```python -def init_database(): - global index, vector_store - # Load documents - documents = load_documents_from_folder("../data") - # Create vector store and index - vector_store = create_vector_store("./gradio_vectordb") - index = create_vector_index(documents, vector_store) - return "Database initialized successfully!" -``` - -**Expected Pattern for Query**: -```python -def process_query(query): - if index is None: - return "Please initialize database first!" - results = search_documents(index, query, top_k=3) - # Format and return response - return formatted_response -``` - -#### Step 4: Build Gradio Interface - -**Expected Structure**: -```python -with gr.Blocks() as interface: - gr.Markdown("# RAG System") - - with gr.Row(): - init_btn = gr.Button("Initialize Database") - status = gr.Textbox(label="Status", interactive=False) - - with gr.Row(): - query_input = gr.Textbox(label="Enter your question") - submit_btn = gr.Button("Search") - - output = gr.Textbox(label="Response", lines=10) - - init_btn.click(init_database, outputs=status) - submit_btn.click(process_query, inputs=query_input, outputs=output) - -interface.launch() -``` - -#### Step 5: Test Interface - -1. Launch interface with `.launch()` -2. Click "Initialize Database" button -3. Enter a test query -4. Click "Search" button -5. Verify response appears - -### 3.3 Success Criteria - -โœ… Interface launches without errors -โœ… Database initialization works -โœ… Queries process and return responses -โœ… Status messages display correctly -โœ… Interface is functional and user-friendly - ---- - -## ๐ŸŽจ STEP 4: Assignment 3b - Advanced Gradio Interface - -**Time Estimate**: 90-120 minutes -**Difficulty**: Advanced -**Prerequisites**: โœ… Complete Assignment 3a - -### 4.1 Understanding the Assignment - -**Goal**: Extend basic interface with advanced configuration options - -**Advanced Features**: -1. Model selection dropdown -2. Temperature slider -3. Chunk size/overlap configuration -4. Similarity cutoff controls -5. Response synthesizer selection -6. Dynamic parameter updates - -### 4.2 Step-by-Step Approach - -#### Step 1: Open Assignment Notebook -```bash -# Open: assignment_3b_advanced_gradio_rag.ipynb -``` - -#### Step 2: Study Reference Implementation - -Look at the README screenshot - this shows the target interface with all controls - -#### Step 3: Create Configuration Function - -**Function**: Updates RAG system with new parameters - -**Expected Pattern**: -```python -def update_config(model, temperature, chunk_size, similarity_cutoff, synthesizer): - Settings.llm = OpenRouter(model=model, temperature=temperature) - Settings.chunk_size = chunk_size - # Update other settings... - return f"Configuration updated: {model}, temp={temperature}" -``` - -#### Step 4: Build Advanced Interface Layout - -**Expected Components**: -```python -with gr.Blocks() as interface: - gr.Markdown("# Advanced RAG System") - - with gr.Row(): - with gr.Column(): - model_dropdown = gr.Dropdown( - choices=["gpt-4o", "gpt-4o-mini", "gpt-4o-nano"], - label="Model" - ) - temp_slider = gr.Slider(0, 1, 0.1, step=0.1, label="Temperature") - chunk_size = gr.Number(512, label="Chunk Size") - similarity_cutoff = gr.Slider(0, 1, 0.7, step=0.05, label="Similarity Cutoff") - synthesizer_dropdown = gr.Dropdown( - choices=["TreeSummarize", "Refine", "CompactAndRefine"], - label="Response Synthesizer" - ) - update_config_btn = gr.Button("Update Configuration") - - with gr.Column(): - # Query and output components - query_input = gr.Textbox(label="Query") - submit_btn = gr.Button("Search") - output = gr.Textbox(label="Response", lines=15) -``` - -#### Step 5: Wire Up All Components - -Connect all inputs to configuration and query functions - -#### Step 6: Test All Features - -Test each configuration option: -- Change model and verify behavior -- Adjust temperature and observe response changes -- Modify chunk size and check retrieval -- Test different synthesizers - -### 4.3 Success Criteria - -โœ… All configuration options work -โœ… Interface updates dynamically -โœ… Parameter changes affect responses -โœ… Interface is professional and organized -โœ… All features from 3a still work - ---- - -## ๐ŸŽ“ General Tips & Best Practices - -### Working with Tutorials - -1. **Don't Copy-Paste Directly**: Understand concepts first, then adapt -2. **Read Comments Carefully**: Tutorials have extensive explanations -3. **Compare Approaches**: Notice differences between basic and advanced techniques -4. **Test Incrementally**: Run code after each small change - -### Debugging Strategy - -1. **Read Error Messages**: They usually tell you what's wrong -2. **Check Variable Types**: Use `type()` and `print()` to inspect -3. **Test Functions Individually**: Don't wait until the end to test -4. **Verify Paths**: Ensure file paths are correct relative to notebook location - -### Code Organization - -1. **Use Meaningful Names**: Make variable names descriptive -2. **Add Comments**: Explain why, not just what -3. **Test Each Step**: Don't skip test cells -4. **Save Frequently**: Notebooks can crash - save your work! - -### Common Mistakes to Avoid - -โŒ **Skipping Assignments**: Each builds on the previous -โŒ **Not Reading Tutorials**: They contain essential examples -โŒ **Ignoring Error Messages**: They guide you to solutions -โŒ **Rushing Through**: Understanding > Completion -โŒ **Not Testing**: Always test after completing each function - ---- - -## ๐Ÿ“Š Progress Tracking - -Use this checklist to track your progress: - -### Assignment 1: Vector DB Basics -- [ ] Environment setup complete -- [ ] Tutorial reviewed -- [ ] Function 1: Document loading completed -- [ ] Function 2: Vector store created -- [ ] Function 3: Index creation working -- [ ] Function 4: Search implemented -- [ ] Final pipeline test passed -- [ ] **Assignment 1 Complete! โœ…** - -### Assignment 2: Advanced RAG -- [ ] Prerequisites verified (Assignment 1 done) -- [ ] API key configured -- [ ] Tutorial reviewed -- [ ] Technique 1: Postprocessor implemented -- [ ] Technique 2: TreeSummarize working -- [ ] Technique 3: Structured outputs working -- [ ] Technique 4: Advanced pipeline complete -- [ ] **Assignment 2 Complete! โœ…** - -### Assignment 3a: Basic Gradio -- [ ] Assignments 1 & 2 complete -- [ ] Backend functions completed -- [ ] Gradio interface built -- [ ] Database init working -- [ ] Query processing working -- [ ] Interface tested and functional -- [ ] **Assignment 3a Complete! โœ…** - -### Assignment 3b: Advanced Gradio -- [ ] Assignment 3a complete -- [ ] Configuration function implemented -- [ ] All UI components added -- [ ] All controls wired up -- [ ] Dynamic updates working -- [ ] All features tested -- [ ] **Assignment 3b Complete! โœ…** - ---- - -## ๐Ÿ†˜ Getting Help - -### If You're Stuck - -1. **Re-read the Tutorial**: Often the answer is there -2. **Check Function Signatures**: Ensure parameters match -3. **Test with Print Statements**: Debug by inspecting values -4. **Compare with Tutorial Code**: See what's different -5. **Review Error Stack Traces**: Line numbers point to issues - -### Resources - -- **LlamaIndex Docs**: https://docs.llamaindex.ai/ -- **Gradio Docs**: https://gradio.app/docs/ -- **LanceDB Docs**: https://lancedb.github.io/lancedb/ -- **Tutorial Notebooks**: `../llamaindex_rag/` folder - ---- - -## ๐ŸŽฏ Final Checklist Before Submission - -- [ ] All assignments completed -- [ ] All test cells pass -- [ ] No TODO comments remaining -- [ ] Code runs without errors -- [ ] Functions are well-commented -- [ ] You understand what each part does -- [ ] You can explain your solutions - ---- - -## ๐ŸŽ‰ Completion Reward - -Once you complete all assignments, you'll have: -- โœ… Built a complete vector database system -- โœ… Implemented advanced RAG techniques -- โœ… Created professional web interfaces -- โœ… Gained hands-on experience with production RAG systems - -**Congratulations on completing the RAG course! ๐Ÿš€** - ---- - -## ๐Ÿ“ Notes Section - -Use this space to jot down: -- Personal reminders -- Things you learned -- Questions to ask -- Ideas for extensions - ---- - -**Good luck with your assignments! You've got this! ๐Ÿ’ช** - diff --git a/Girish_Basavaraj_Hiremath/session_2/SETUP_ENV.md b/Girish_Basavaraj_Hiremath/session_2/SETUP_ENV.md deleted file mode 100644 index 2baf0ee..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/SETUP_ENV.md +++ /dev/null @@ -1,162 +0,0 @@ -# ๐Ÿ” Setting Up .env File for API Keys - -This guide shows you how to securely store your API keys using a `.env` file. - ---- - -## ๐Ÿ“‹ Steps to Set Up - -### Step 1: Install python-dotenv - -```bash -pip install python-dotenv -``` - -Or install all requirements: -```bash -pip install -r requirements.txt -``` - -### Step 2: Create .env File - -Create a file named `.env` in the `Day_6/session_2/` folder with the following content: - -**Windows (File Explorer):** -1. Navigate to `C:\Users\gengi\OneDrive\Desktop\ai-accelerator-C2\Day_6\session_2\` -2. Right-click โ†’ New โ†’ Text Document -3. Name it `.env` (with the dot at the start) -4. If Windows hides extensions, you may need to: - - Enable "Show file extensions" in File Explorer settings - - Or use a text editor to create the file - -**Windows (Command Prompt):** -```cmd -cd C:\Users\gengi\OneDrive\Desktop\ai-accelerator-C2\Day_6\session_2 -echo OPENROUTER_API_KEY=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 > .env -``` - -**Windows (PowerShell):** -```powershell -cd "C:\Users\gengi\OneDrive\Desktop\ai-accelerator-C2\Day_6\session_2" -"OPENROUTER_API_KEY=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" | Out-File -FilePath .env -Encoding utf8 -``` - -**Using a Text Editor:** -1. Open Notepad, VS Code, or any text editor -2. Paste this content: - ``` - OPENROUTER_API_KEY=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 - ``` -3. Save the file as `.env` (with the dot at the start) in the `session_2` folder -4. Make sure it's saved as a plain text file (not `.env.txt`) - -### Step 3: Verify .env File - -Your `.env` file should: -- Be located at: `Day_6/session_2/.env` -- Contain exactly: - ``` - OPENROUTER_API_KEY=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 - ``` -- Have no extra spaces or quotes around the key - -### Step 4: Verify .gitignore - -I've already created a `.gitignore` file that includes `.env`, so your API key won't be committed to Git. The `.gitignore` file includes: -``` -# Environment variables - DO NOT COMMIT API KEYS! -.env -``` - ---- - -## โœ… Verification - -After creating the `.env` file: - -1. **Re-run Cell 1** in your notebook (to load dotenv) -2. **Re-run Cell 2** (to load settings) - -You should now see: -``` -โœ… OPENROUTER_API_KEY found - full advanced RAG functionality available -โœ… Advanced RAG settings configured -``` - ---- - -## ๐Ÿ”’ Security Best Practices - -โœ… **DO:** -- Keep `.env` file local (never commit to Git) -- Add `.env` to `.gitignore` (already done) -- Use different API keys for different projects -- Rotate API keys regularly - -โŒ **DON'T:** -- Commit `.env` files to version control -- Share API keys in code or documentation -- Hardcode API keys in notebooks (use `.env` instead) -- Post API keys publicly - ---- - -## ๐Ÿ› Troubleshooting - -### Issue: "python-dotenv not installed" -**Solution**: Run `pip install python-dotenv` - -### Issue: "OPENROUTER_API_KEY not found" after creating .env -**Solutions**: -1. Make sure `.env` file is in the correct location: `Day_6/session_2/.env` -2. Check the file is named exactly `.env` (not `.env.txt` or `env`) -3. Verify the content format: `OPENROUTER_API_KEY=your_key_here` (no spaces around `=`) -4. Restart the Jupyter kernel and re-run the cells - -### Issue: File Explorer doesn't show .env file -**Solution**: -- Enable "Show hidden files" in File Explorer settings -- Or use command line to verify the file exists - -### Issue: Windows shows "File name cannot contain: ." -**Solution**: -- Use command line (cmd or PowerShell) to create the file -- Or create it as `env` and rename it to `.env` after saving - ---- - -## ๐Ÿ“ File Structure - -After setup, your folder should look like: -``` -Day_6/session_2/ -โ”œโ”€โ”€ .env โ† Your API key (NOT in Git) -โ”œโ”€โ”€ .gitignore โ† Protects .env from being committed -โ”œโ”€โ”€ requirements.txt -โ”œโ”€โ”€ assignments/ -โ”œโ”€โ”€ data/ -โ””โ”€โ”€ ... -``` - ---- - -## ๐ŸŽฏ Alternative: Environment Variables - -If you prefer to set environment variables directly (without .env file): - -**Windows (Command Prompt):** -```cmd -set OPENROUTER_API_KEY=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 -``` - -**Windows (PowerShell):** -```powershell -$env:OPENROUTER_API_KEY="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" -``` - -**Note**: These only last for the current session. `.env` file is more convenient. - ---- - -**That's it! Your API key is now securely stored and won't be committed to Git.** ๐Ÿ” - diff --git a/Girish_Basavaraj_Hiremath/session_2/assignments/README.md b/Girish_Basavaraj_Hiremath/session_2/assignments/README.md deleted file mode 100644 index 096755e..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/assignments/README.md +++ /dev/null @@ -1,280 +0,0 @@ -# Day 6 Session 2 - RAG Assignments - -This directory contains assignments for Day 6 Session 2, focusing on building RAG (Retrieval-Augmented Generation) systems with LlamaIndex. - -## Assignment 1: Vector Database Creation and Retrieval - -**File:** `assignment_1_vector_db_basics.ipynb` -**Solution:** `assignment_1_solution.ipynb` - -### Objective -Learn the fundamentals of vector databases by creating a complete document indexing and retrieval system. - -### Learning Goals -- Understand document loading with `SimpleDirectoryReader` -- Learn vector store setup with LanceDB -- Implement vector index creation with `StorageContext` -- Perform semantic search and retrieval -- Use local embeddings (no OpenAI API key required) -- Configured for OpenRouter compatibility (when LLM needed) - -### What You'll Build -1. **Document Loader**: Load documents from a folder using `SimpleDirectoryReader` -2. **Vector Store**: Create a LanceDB vector store for embeddings -3. **Index Creator**: Build a vector index from documents -4. **Search Function**: Implement semantic search functionality - -### Instructions -1. Open `assignment_1_vector_db_basics.ipynb` -2. Complete each function by replacing the TODO comments -3. Run each cell after completing the function to test it -4. Refer to the existing notebooks in `llamaindex_rag/` folder for examples -5. Use `assignment_1_solution.ipynb` to check your answers - -### API Configuration -- โœ… **No OpenAI API key required** - uses local embeddings -- โœ… **OpenRouter ready** - configured for future LLM operations -- โœ… **Cost-effective** - all vector operations run locally - -### Key Concepts Covered -- **SimpleDirectoryReader**: Loading documents from folders -- **LanceDBVectorStore**: Vector storage with LanceDB -- **StorageContext**: Managing storage components -- **VectorStoreIndex**: Creating searchable indexes -- **Semantic Retrieval**: Finding relevant documents by meaning - -### Expected Output -After completing all functions, you should be able to: -- Load documents from the `../data` folder -- Create a vector database -- Search for documents using natural language queries -- Get relevant results with similarity scores - -### Tips -- The data folder contains diverse file types (PDFs, CSVs, Markdown, HTML, etc.) -- SimpleDirectoryReader handles multiple file formats automatically -- Use `recursive=True` to load files from subdirectories -- LanceDB provides efficient vector storage and retrieval -- The similarity scores help evaluate result relevance - -## Dataset -The assignment uses the data in `../data/` which includes: -- AI research papers (PDFs) -- Agent evaluation metrics (CSV) -- Cooking recipes (Markdown, CSV) -- Financial data (CSV, Markdown) -- Health tracking data (HTML) -- Travel guides (Markdown) -- Various images - -This diverse dataset demonstrates the multimodal capabilities of the RAG system. - -## Getting Help -If you get stuck: -1. Check the existing notebooks in `llamaindex_rag/` for examples -2. Look at the solution file for guidance -3. Review the LlamaIndex documentation -4. Ask for help during the session - -## Assignment 2: Advanced RAG Techniques - -**File:** `assignment_2_advanced_rag.ipynb` -**Solution:** `assignment_2_solution.ipynb` - -### Objective -Master advanced RAG techniques that transform basic document retrieval into production-ready, intelligent systems. - -### Learning Goals -- Understand and implement node postprocessors for filtering and reranking -- Learn different response synthesis strategies (TreeSummarize, Refine) -- Create structured outputs using Pydantic models -- Build advanced retrieval pipelines with multiple processing stages - -### Prerequisites -- Complete Assignment 1 first -- Understanding of basic vector databases and retrieval - -### What You'll Build -1. **Similarity Postprocessor**: Filter low-relevance results for better precision -2. **TreeSummarize Engine**: Create comprehensive analytical responses -3. **Structured Output System**: Generate type-safe JSON responses -4. **Advanced Pipeline**: Combine all techniques into production-ready system - -### Advanced Concepts Covered -- **Node Postprocessors**: `SimilarityPostprocessor` for result filtering -- **Response Synthesizers**: `TreeSummarize` for complex analysis -- **Structured Outputs**: `PydanticOutputParser` for type-safe responses -- **Advanced Pipelines**: Combining multiple techniques - -### Instructions -1. Complete Assignment 1 before starting this one -2. Open `assignment_2_advanced_rag.ipynb` -3. Complete each function by replacing the TODO comments -4. Run each cell after completing the function to test it -5. Refer to the `03_advanced_rag_techniques.ipynb` notebook for examples -6. Use `assignment_2_solution.ipynb` to check your answers - -### API Configuration -- โœ… **OpenRouter LLM required** - for response synthesis -- โœ… **Local embeddings** - cost-effective vector operations -- โš ๏ธ **LLM operations** - needed for advanced response synthesis - -### Expected Output -After completing all functions, you should be able to: -- Filter search results based on relevance scores -- Generate comprehensive analytical responses -- Receive structured JSON outputs instead of free text -- Compare basic vs advanced RAG performance - -### Key Benefits -- **Better Precision**: Similarity filtering removes irrelevant results -- **Comprehensive Analysis**: TreeSummarize provides deeper insights -- **Reliable Integration**: Structured outputs enable system integration -- **Production Ready**: Advanced pipelines suitable for real applications - -## Assignment 3a: Basic Gradio RAG Frontend - -**File:** `assignment_3a_basic_gradio_rag.ipynb` - -### Objective -Build a simple Gradio frontend for your RAG system with essential features only - perfect for learning Gradio fundamentals. - -### Learning Goals -- Create basic Gradio interfaces -- Connect RAG backend to frontend -- Handle user interactions and database initialization -- Build functional AI-powered web applications - -### Prerequisites -- Complete Assignments 1 & 2 -- Basic understanding of Gradio from Day 2 - -### What You'll Build -**Essential Features Only:** -1. **Initialize Database Button**: Set up vector database with one click -2. **Search Query Input**: Text input for user questions -3. **Submit Button**: Process queries and get responses -4. **Response Display**: Show AI-generated answers -5. **Status Messages**: Display initialization and error messages - -### Key Components -- `gr.Blocks()` for custom layout -- `gr.Button()` for initialization and search -- `gr.Textbox()` for input and output -- Simple event handling with `.click()` - -### Instructions -1. Complete Assignments 1 & 2 first -2. Open `assignment_3a_basic_gradio_rag.ipynb` -3. Follow the step-by-step implementation -4. Test your interface after each section - -### Expected Output -A simple but functional RAG web interface where users can: -- Initialize the vector database -- Ask questions and receive AI responses -- Get clear status messages - -Screenshot 2025-09-21 at 9 11 55โ€ฏAM - ---- - -## Assignment 3b: Advanced Gradio RAG Frontend - -**File:** `assignment_3b_advanced_gradio_rag.ipynb` - -### Objective -Extend your basic RAG interface with advanced configuration options to create a professional, feature-rich application. - -### Learning Goals -- Advanced Gradio components and interactions -- Dynamic RAG configuration -- Professional UI design patterns -- Parameter validation and handling -- Building production-ready AI applications - -### Prerequisites -- Complete Assignment 3a (Basic Gradio RAG) -- Understanding of RAG parameters and their effects - -### What You'll Build -**Advanced Configuration Features:** -1. **Model Selection**: Dropdown for gpt-4o, gpt-4o-mini, gpt-4o-nano -2. **Temperature Control**: Slider (0 to 1, step 0.1) -3. **Chunk Configuration**: Size and overlap inputs -4. **Similarity Top-K**: Slider for number of documents to retrieve -5. **Node Postprocessors**: Multiselect for filtering options -6. **Similarity Cutoff**: Slider for relevance filtering -7. **Response Synthesizers**: Dropdown for TreeSummarize, Refine, etc. -8. **Configuration Display**: Show current parameter settings - -### Advanced UI Components -- **Model Dropdown**: `gr.Dropdown()` with predefined options -- **Parameter Sliders**: `gr.Slider()` with custom ranges and steps -- **Multi-select**: `gr.CheckboxGroup()` for postprocessor selection -- **Number Inputs**: `gr.Number()` for chunk size/overlap -- **Professional Layout**: `gr.Row()` and `gr.Column()` for organization - -### Instructions -1. Complete Assignment 3a first (basic interface) -2. Open `assignment_3b_advanced_gradio_rag.ipynb` -3. Implement the advanced backend with configurable parameters -4. Build the sophisticated interface with all controls -5. Test different parameter combinations -6. Experiment with various configurations to understand their effects - -### API Configuration -- โœ… **Dynamic LLM selection** - Choose between different models -- โœ… **Configurable parameters** - Adjust all RAG settings in real-time -- โœ… **OpenRouter integration** - for multiple model access -- โœ… **Local embeddings** - cost-effective vector operations - -### Expected Output -A professional RAG interface with: -- Full parameter control and real-time configuration -- Clear display of current settings -- Professional layout and user experience -- Ability to experiment with different RAG approaches - -Screenshot 2025-09-21 at 9 39 22โ€ฏAM - - -### Key Benefits -- **Parameter Understanding**: Learn how different settings affect RAG performance -- **Production Patterns**: Build interfaces suitable for real applications -- **User Control**: Give users fine-grained control over AI behavior -- **Experimentation**: Easy A/B testing of different configurations - -### Configuration Learning -Through this assignment, you'll understand: -- **Model Selection**: When to use different GPT models -- **Temperature Effects**: How creativity vs accuracy is controlled -- **Chunking Strategy**: Impact of chunk size and overlap on retrieval -- **Filtering Techniques**: How similarity cutoffs improve precision -- **Synthesis Methods**: Different approaches to combining retrieved information - ---- - -## Assignment Solutions - -**Solution Files:** -- `assignment_1_solution.ipynb` - Vector Database Basics -- `assignment_2_solution.ipynb` - Advanced RAG Techniques -- `assignment_3a_solution.ipynb` - Basic Gradio RAG Frontend -- `assignment_3b_solution.ipynb` - Advanced Gradio RAG Frontend - -### Real-World Applications -Both assignments prepare you for building: -- **Knowledge Management**: Internal company document search -- **Research Assistant**: Academic paper analysis and Q&A -- **Customer Support**: Automated help desk with document knowledge -- **Educational Tools**: Interactive learning from course materials -- **Content Discovery**: Smart search through large document collections - -### Deployment Options -- **Local**: Run on your machine for development -- **Shared**: Create public links for team access -- **Cloud**: Deploy to platforms like Hugging Face Spaces -- **Enterprise**: Integrate into existing web applications - -Good luck! ๐Ÿš€ diff --git a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_1_vector_db_basics.ipynb b/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_1_vector_db_basics.ipynb deleted file mode 100644 index 59f02ec..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_1_vector_db_basics.ipynb +++ /dev/null @@ -1,581 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 1: Vector Database Creation and Retrieval\n", - "## Day 6 Session 2 - RAG Fundamentals\n", - "\n", - "**OBJECTIVE:** Create a vector database from a folder of documents and implement basic retrieval functionality.\n", - "\n", - "**LEARNING GOALS:**\n", - "- Understand document loading with SimpleDirectoryReader\n", - "- Learn vector store setup with LanceDB\n", - "- Implement vector index creation\n", - "- Perform semantic search and retrieval\n", - "\n", - "**DATASET:** Use the data folder in `Day_6/session_2/data/` which contains multiple file types\n", - "\n", - "**INSTRUCTIONS:**\n", - "1. Complete each function by replacing the TODO comments with actual implementation\n", - "2. Run each cell after completing the function to test it\n", - "3. The answers can be found in the existing notebooks in the `llamaindex_rag/` folder\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Libraries imported successfully!\n" - ] - } - ], - "source": [ - "# Import required libraries\n", - "import os\n", - "from pathlib import Path\n", - "from typing import List\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "\n", - "print(\"โœ… Libraries imported successfully!\")" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โ„น๏ธ OPENROUTER_API_KEY not found - that's OK for this assignment!\n", - " This assignment only uses local embeddings for vector operations.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\huggingface_hub\\file_download.py:143: UserWarning: `huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in C:\\Users\\gengi\\AppData\\Local\\llama_index\\llama_index\\Cache\\models--BAAI--bge-small-en-v1.5. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.\n", - "To support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development\n", - " warnings.warn(message)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… LlamaIndex configured with local embeddings\n", - " Using BAAI/bge-small-en-v1.5 for document embeddings\n" - ] - } - ], - "source": [ - "# Configure LlamaIndex Settings (Using OpenRouter - No OpenAI API Key needed)\n", - "def setup_llamaindex_settings():\n", - " \"\"\"\n", - " Configure LlamaIndex with local embeddings and OpenRouter for LLM.\n", - " This assignment focuses on vector database operations, so we'll use local embeddings only.\n", - " \"\"\"\n", - " # Check for OpenRouter API key (for future use, not needed for this basic assignment)\n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " if not api_key:\n", - " print(\"โ„น๏ธ OPENROUTER_API_KEY not found - that's OK for this assignment!\")\n", - " print(\" This assignment only uses local embeddings for vector operations.\")\n", - " \n", - " # Configure local embeddings (no API key required)\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " print(\"โœ… LlamaIndex configured with local embeddings\")\n", - " print(\" Using BAAI/bge-small-en-v1.5 for document embeddings\")\n", - "\n", - "# Setup the configuration\n", - "setup_llamaindex_settings()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Document Loading Function\n", - "\n", - "Complete the function below to load documents from a folder using `SimpleDirectoryReader`.\n", - "\n", - "**Note:** This assignment uses local embeddings only - no OpenAI API key required! We're configured to use OpenRouter for future LLM operations.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 139M/139M [01:58<00:00, 1.23MiB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\ai_agents.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\in_the_end.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\whisper\\transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\rags.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Loaded 39 documents\n" - ] - } - ], - "source": [ - "def load_documents_from_folder(folder_path: str):\n", - " \"\"\"\n", - " Load documents from a folder using SimpleDirectoryReader.\n", - " \n", - " TODO: Complete this function to load documents from the given folder path.\n", - " HINT: Use SimpleDirectoryReader with recursive parameter to load all files\n", - " \n", - " Args:\n", - " folder_path (str): Path to the folder containing documents\n", - " \n", - " Returns:\n", - " List of documents loaded from the folder\n", - " \"\"\"\n", - " # Create SimpleDirectoryReader instance\n", - " reader = SimpleDirectoryReader(input_dir=folder_path, recursive=True)\n", - " \n", - " # Load and return documents\n", - " documents = reader.load_data()\n", - " \n", - " return documents\n", - "\n", - "# Test the function after you complete it\n", - "test_folder = \"../data\"\n", - "documents = load_documents_from_folder(test_folder)\n", - "print(f\"Loaded {len(documents)} documents\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Vector Store Creation Function\n", - "\n", - "Complete the function below to create a LanceDB vector store.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 13:52:16,133 - WARNING - Table documents doesn't exist yet. Please add some data to create it.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Vector store created: True\n" - ] - } - ], - "source": [ - "def create_vector_store(db_path: str = \"./vectordb\", table_name: str = \"documents\"):\n", - " \"\"\"\n", - " Create a LanceDB vector store for storing document embeddings.\n", - " \n", - " TODO: Complete this function to create and configure a LanceDB vector store.\n", - " HINT: Use LanceDBVectorStore with uri and table_name parameters\n", - " \n", - " Args:\n", - " db_path (str): Path where the vector database will be stored\n", - " table_name (str): Name of the table in the vector database\n", - " \n", - " Returns:\n", - " LanceDBVectorStore: Configured vector store\n", - " \"\"\"\n", - " # Create the directory if it doesn't exist\n", - " Path(db_path).mkdir(parents=True, exist_ok=True)\n", - " \n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(uri=db_path, table_name=table_name)\n", - " \n", - " return vector_store\n", - "\n", - "# Test the function after you complete it\n", - "vector_store = create_vector_store(\"./assignment_vectordb\")\n", - "print(f\"Vector store created: {vector_store is not None}\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Vector Index Creation Function\n", - "\n", - "Complete the function below to create a vector index from documents.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 39/39 [00:09<00:00, 4.20it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 52/52 [00:20<00:00, 2.53it/s]\n", - "2025-11-02 13:52:56,491 - INFO - Create new table documents adding data.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Vector index created: True\n" - ] - } - ], - "source": [ - "def create_vector_index(documents: List, vector_store):\n", - " \"\"\"\n", - " Create a vector index from documents using the provided vector store.\n", - " \n", - " TODO: Complete this function to create a VectorStoreIndex from documents.\n", - " HINT: Create StorageContext with vector_store, then use VectorStoreIndex.from_documents()\n", - " \n", - " Args:\n", - " documents: List of documents to index\n", - " vector_store: LanceDB vector store to use for storage\n", - " \n", - " Returns:\n", - " VectorStoreIndex: The created vector index\n", - " \"\"\"\n", - " # Create storage context with vector store\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " \n", - " # Create index from documents\n", - " index = VectorStoreIndex.from_documents(\n", - " documents,\n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " return index\n", - "\n", - "# Test the function after you complete it (will only work after previous functions are completed)\n", - "if documents and vector_store:\n", - " index = create_vector_index(documents, vector_store)\n", - " print(f\"Vector index created: {index is not None}\")\n", - "else:\n", - " print(\"Complete previous functions first to test this one\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Document Search Function\n", - "\n", - "Complete the function below to search for relevant documents using the vector index.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 13:53:10,611 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 2 results for query: 'What are AI agents?'\n", - "Result 1: THE LANDSCAPE OF EMERGING AI AGENT ARCHITECTURES\n", - "FOR REASONING , PLANNING , AND TOOL CALLING : A S U...\n", - "Result 2: agent-personas or the user is not needed, multi-agent architectures tend to thrive more when collabo...\n" - ] - } - ], - "source": [ - "def search_documents(index, query: str, top_k: int = 3):\n", - " \"\"\"\n", - " Search for relevant documents using the vector index.\n", - " \n", - " TODO: Complete this function to perform semantic search on the index.\n", - " HINT: Use index.as_retriever() with similarity_top_k parameter, then retrieve(query)\n", - " \n", - " Args:\n", - " index: Vector index to search\n", - " query (str): Search query\n", - " top_k (int): Number of top results to return\n", - " \n", - " Returns:\n", - " List of retrieved document nodes\n", - " \"\"\"\n", - " # Create retriever from index\n", - " retriever = index.as_retriever(similarity_top_k=top_k)\n", - " \n", - " # Retrieve documents for the query\n", - " results = retriever.retrieve(query)\n", - " \n", - " return results\n", - "\n", - "# Test the function after you complete it (will only work after all previous functions are completed)\n", - "if 'index' in locals() and index is not None:\n", - " test_query = \"What are AI agents?\"\n", - " results = search_documents(index, test_query, top_k=2)\n", - " print(f\"Found {len(results)} results for query: '{test_query}'\")\n", - " for i, result in enumerate(results, 1):\n", - " print(f\"Result {i}: {result.text[:100] if hasattr(result, 'text') else 'No text'}...\")\n", - "else:\n", - " print(\"Complete all previous functions first to test this one\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Final Test - Complete Pipeline\n", - "\n", - "Once you've completed all the functions above, run this cell to test the complete pipeline with multiple search queries.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Testing Complete Vector Database Pipeline\n", - "==================================================\n", - "\n", - "๐Ÿ“‚ Step 1: Loading documents...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\ai_agents.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\in_the_end.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\rags.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\whisper\\transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Loaded 39 documents\n", - "\n", - "๐Ÿ—„๏ธ Step 2: Creating vector store...\n", - " Vector store status: โœ… Created\n", - "\n", - "๐Ÿ”— Step 3: Creating vector index...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 39/39 [00:00<00:00, 324.04it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 52/52 [00:17<00:00, 3.01it/s]\n", - "2025-11-02 13:53:50,179 - INFO - query_type :, vector\n", - "2025-11-02 13:53:50,261 - INFO - query_type :, vector\n", - "2025-11-02 13:53:50,330 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Index status: โœ… Created\n", - "\n", - "๐Ÿ” Step 4: Testing search functionality...\n", - "\n", - " ๐Ÿ”Ž Query: 'What are AI agents?'\n", - " 1. THE LANDSCAPE OF EMERGING AI AGENT ARCHITECTURES\n", - "FOR REASONING , PLANNING , AND TOOL CALLING : A S U... (Score: 0.6220)\n", - "\n", - " ๐Ÿ”Ž Query: 'How to evaluate agent performance?'\n", - " 1. steps, but the answers are limited to Yes/No responses [7]. As the industry continues to pivot towar... (Score: 0.6772)\n", - "\n", - " ๐Ÿ”Ž Query: 'Italian recipes and cooking'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 13:53:50,406 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " 1. # ๐Ÿ Classic Spaghetti Carbonara Recipe\n", - "\n", - "## Ingredients\n", - "- 400g spaghetti pasta\n", - "- 4 large egg yolks\n", - "- ... (Score: 0.6188)\n", - "\n", - " ๐Ÿ”Ž Query: 'Financial analysis and investment'\n", - " 1. However, several important considerations need to be ad-\n", - "dressed in future work:\n", - "โ€ข Scalability: Eval... (Score: 0.5613)\n", - "\n", - "==================================================\n", - "๐ŸŽฏ Assignment Status:\n", - " Documents loaded: โœ…\n", - " Vector store created: โœ…\n", - " Index created: โœ…\n", - " Search working: โœ…\n", - "\n", - "๐ŸŽ‰ Congratulations! You've successfully completed the assignment!\n", - " You've built a complete vector database with search functionality!\n" - ] - } - ], - "source": [ - "# Final test of the complete pipeline\n", - "print(\"๐Ÿš€ Testing Complete Vector Database Pipeline\")\n", - "print(\"=\" * 50)\n", - "\n", - "# Re-run the complete pipeline to ensure everything works\n", - "data_folder = \"../data\"\n", - "vector_db_path = \"./assignment_vectordb\"\n", - "\n", - "# Step 1: Load documents\n", - "print(\"\\n๐Ÿ“‚ Step 1: Loading documents...\")\n", - "documents = load_documents_from_folder(data_folder)\n", - "print(f\" Loaded {len(documents)} documents\")\n", - "\n", - "# Step 2: Create vector store\n", - "print(\"\\n๐Ÿ—„๏ธ Step 2: Creating vector store...\")\n", - "vector_store = create_vector_store(vector_db_path)\n", - "print(\" Vector store status:\", \"โœ… Created\" if vector_store else \"โŒ Failed\")\n", - "\n", - "# Step 3: Create vector index\n", - "print(\"\\n๐Ÿ”— Step 3: Creating vector index...\")\n", - "if documents and vector_store:\n", - " index = create_vector_index(documents, vector_store)\n", - " print(\" Index status:\", \"โœ… Created\" if index else \"โŒ Failed\")\n", - "else:\n", - " index = None\n", - " print(\" โŒ Cannot create index - missing documents or vector store\")\n", - "\n", - "# Step 4: Test multiple search queries\n", - "print(\"\\n๐Ÿ” Step 4: Testing search functionality...\")\n", - "if index:\n", - " search_queries = [\n", - " \"What are AI agents?\",\n", - " \"How to evaluate agent performance?\", \n", - " \"Italian recipes and cooking\",\n", - " \"Financial analysis and investment\"\n", - " ]\n", - " \n", - " for query in search_queries:\n", - " print(f\"\\n ๐Ÿ”Ž Query: '{query}'\")\n", - " results = search_documents(index, query, top_k=2)\n", - " \n", - " if results:\n", - " for i, result in enumerate(results, 1):\n", - " text_preview = result.text[:100] if hasattr(result, 'text') else \"No text available\"\n", - " score = f\" (Score: {result.score:.4f})\" if hasattr(result, 'score') else \"\"\n", - " print(f\" {i}. {text_preview}...{score}\")\n", - " else:\n", - " print(\" No results found\")\n", - "else:\n", - " print(\" โŒ Cannot test search - index not created\")\n", - "\n", - "print(\"\\n\" + \"=\" * 50)\n", - "print(\"๐ŸŽฏ Assignment Status:\")\n", - "print(f\" Documents loaded: {'โœ…' if documents else 'โŒ'}\")\n", - "print(f\" Vector store created: {'โœ…' if vector_store else 'โŒ'}\")\n", - "print(f\" Index created: {'โœ…' if index else 'โŒ'}\")\n", - "print(f\" Search working: {'โœ…' if index else 'โŒ'}\")\n", - "\n", - "if documents and vector_store and index:\n", - " print(\"\\n๐ŸŽ‰ Congratulations! You've successfully completed the assignment!\")\n", - " print(\" You've built a complete vector database with search functionality!\")\n", - "else:\n", - " print(\"\\n๐Ÿ“ Please complete the TODO functions above to finish the assignment.\")\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_2_advanced_rag.ipynb b/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_2_advanced_rag.ipynb deleted file mode 100644 index 36a8069..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_2_advanced_rag.ipynb +++ /dev/null @@ -1,997 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 2: Advanced RAG Techniques\n", - "## Day 6 Session 2 - Advanced RAG Fundamentals\n", - "\n", - "**OBJECTIVE:** Implement advanced RAG techniques including postprocessors, response synthesizers, and structured outputs.\n", - "\n", - "**LEARNING GOALS:**\n", - "- Understand and implement node postprocessors for filtering and reranking\n", - "- Learn different response synthesis strategies (TreeSummarize, Refine)\n", - "- Create structured outputs using Pydantic models\n", - "- Build advanced retrieval pipelines with multiple processing stages\n", - "\n", - "**DATASET:** Use the same data folder as Assignment 1 (`Day_6/session_2/data/`)\n", - "\n", - "**PREREQUISITES:** Complete Assignment 1 first\n", - "\n", - "**INSTRUCTIONS:**\n", - "1. Complete each function by replacing the TODO comments with actual implementation\n", - "2. Run each cell after completing the function to test it\n", - "3. The answers can be found in the `03_advanced_rag_techniques.ipynb` notebook\n", - "4. Each technique builds on the previous one\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Advanced RAG libraries imported successfully!\n" - ] - } - ], - "source": [ - "# Import required libraries for advanced RAG\n", - "import os\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Any\n", - "from pydantic import BaseModel, Field\n", - "\n", - "# Load environment variables from .env file\n", - "try:\n", - " from dotenv import load_dotenv\n", - " load_dotenv() # Load .env file if it exists\n", - "except ImportError:\n", - " print(\"โš ๏ธ python-dotenv not installed. Install with: pip install python-dotenv\")\n", - "\n", - "# Core LlamaIndex components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.core.query_engine import RetrieverQueryEngine\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "\n", - "# Vector store\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "\n", - "# Embeddings and LLM\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "\n", - "# Advanced RAG components (we'll use these in the assignments)\n", - "from llama_index.core.postprocessor import SimilarityPostprocessor\n", - "from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n", - "from llama_index.core.output_parsers import PydanticOutputParser\n", - "\n", - "print(\"โœ… Advanced RAG libraries imported successfully!\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:09:39,604 - INFO - Load pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… OPENROUTER_API_KEY found - full advanced RAG functionality available\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:09:43,555 - INFO - 1 prompt is loaded, with the key: query\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Advanced RAG settings configured\n", - " - Chunk size: 512 (optimized for precision)\n", - " - Using local embeddings for cost efficiency\n", - " - OpenRouter LLM ready for response synthesis\n" - ] - } - ], - "source": [ - "# Configure Advanced RAG Settings (Using OpenRouter)\n", - "def setup_advanced_rag_settings():\n", - " \"\"\"\n", - " Configure LlamaIndex with optimized settings for advanced RAG.\n", - " Uses local embeddings and OpenRouter for LLM operations.\n", - " \"\"\"\n", - " # Check for OpenRouter API key (from .env file or environment)\n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " \n", - " if not api_key:\n", - " print(\"โš ๏ธ OPENROUTER_API_KEY not found!\")\n", - " print(\" Please create a .env file in the session_2 folder with:\")\n", - " print(\" OPENROUTER_API_KEY=your_api_key_here\")\n", - " print(\" OR set it as an environment variable\")\n", - " print(\"\\n LLM operations will be limited\")\n", - " print(\" You can still complete postprocessor and retrieval exercises\")\n", - " return\n", - " else:\n", - " print(\"โœ… OPENROUTER_API_KEY found - full advanced RAG functionality available\")\n", - " \n", - " # Configure OpenRouter LLM\n", - " Settings.llm = OpenRouter(\n", - " api_key=api_key,\n", - " model=\"gpt-4o\",\n", - " temperature=0.1 # Lower temperature for more consistent responses\n", - " )\n", - " \n", - " # Configure local embeddings (no API key required)\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " # Advanced RAG configuration\n", - " Settings.chunk_size = 512 # Smaller chunks for better precision\n", - " Settings.chunk_overlap = 50\n", - " \n", - " print(\"โœ… Advanced RAG settings configured\")\n", - " print(\" - Chunk size: 512 (optimized for precision)\")\n", - " print(\" - Using local embeddings for cost efficiency\")\n", - " print(\" - OpenRouter LLM ready for response synthesis\")\n", - "\n", - "# Setup the configuration\n", - "setup_advanced_rag_settings()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "API Key found!\n" - ] - } - ], - "source": [ - "import os\n", - "from dotenv import load_dotenv\n", - "load_dotenv()\n", - "print(\"API Key found!\" if os.getenv(\"OPENROUTER_API_KEY\") else \"API Key NOT found!\")" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“ Setting up basic index for advanced RAG...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\ai_agents.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\in_the_end.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\rags.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\whisper\\transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 39/39 [00:00<00:00, 274.01it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 92/92 [00:26<00:00, 3.51it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Basic index created with 39 documents\n", - " Ready for advanced RAG techniques!\n", - "๐Ÿš€ Ready to implement advanced RAG techniques!\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "# Setup: Create index from Assignment 1 (reuse the basic functionality)\n", - "def setup_basic_index(data_folder: str = \"../data\", force_rebuild: bool = False):\n", - " \"\"\"\n", - " Create a basic vector index that we'll enhance with advanced techniques.\n", - " This reuses the concepts from Assignment 1.\n", - " \"\"\"\n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./advanced_rag_vectordb\",\n", - " table_name=\"documents\"\n", - " )\n", - " \n", - " # Load documents\n", - " if not Path(data_folder).exists():\n", - " print(f\"โŒ Data folder not found: {data_folder}\")\n", - " return None\n", - " \n", - " reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n", - " documents = reader.load_data()\n", - " \n", - " # Create storage context and index\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " print(f\"โœ… Basic index created with {len(documents)} documents\")\n", - " print(\" Ready for advanced RAG techniques!\")\n", - " return index\n", - "\n", - "# Create the basic index\n", - "print(\"๐Ÿ“ Setting up basic index for advanced RAG...\")\n", - "index = setup_basic_index()\n", - "\n", - "if index:\n", - " print(\"๐Ÿš€ Ready to implement advanced RAG techniques!\")\n", - "else:\n", - " print(\"โŒ Failed to create index - check data folder path\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Node Postprocessors - Similarity Filtering\n", - "\n", - "**Concept:** Postprocessors refine retrieval results after the initial vector search. The `SimilarityPostprocessor` filters out chunks that fall below a relevance threshold.\n", - "\n", - "**Why it matters:** Raw vector search often returns some irrelevant results. Filtering improves precision and response quality.\n", - "\n", - "Complete the function below to create a query engine with similarity filtering.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:06:35,010 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Query engine with similarity filtering created\n", - "\n", - "๐Ÿ” Testing query: 'What are the benefits of AI agents?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:06:39,716 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:06:42,470 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:06:44,703 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“ Response: AI agents offer several benefits, including the ability to tackle complex problems through reasoning, planning, and tool utilization. They can autonomously engage with external environments, make decisions, and assist humans in various tasks. By incorporating reasoning and planning, agents can adapt their strategies based on new information, ensuring effective decision-making in uncertain situations. Additionally, agents can use different tools to interact with external data sources, enhancing their problem-solving capabilities. Both single-agent and multi-agent systems excel at managing intricate tasks, with multi-agent systems providing advantages in scenarios that demand collaboration and parallel task execution.\n" - ] - } - ], - "source": [ - "def create_query_engine_with_similarity_filter(index, similarity_cutoff: float = 0.3, top_k: int = 10):\n", - " \"\"\"\n", - " Create a query engine that filters results based on similarity scores.\n", - " \n", - " TODO: Complete this function to create a query engine with similarity postprocessing.\n", - " HINT: Use index.as_query_engine() with node_postprocessors parameter containing SimilarityPostprocessor\n", - " \n", - " Args:\n", - " index: Vector index to query\n", - " similarity_cutoff: Minimum similarity score (0.0 to 1.0)\n", - " top_k: Number of initial results to retrieve before filtering\n", - " \n", - " Returns:\n", - " Query engine with similarity filtering\n", - " \"\"\"\n", - " # Create similarity postprocessor with the cutoff threshold\n", - " similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n", - " \n", - " # Create query engine with similarity filtering\n", - " query_engine = index.as_query_engine(\n", - " similarity_top_k=top_k,\n", - " node_postprocessors=[similarity_processor]\n", - " )\n", - " \n", - " return query_engine\n", - "\n", - "# Test the function\n", - "if index:\n", - " filtered_engine = create_query_engine_with_similarity_filter(index, similarity_cutoff=0.3)\n", - " \n", - " if filtered_engine:\n", - " print(\"โœ… Query engine with similarity filtering created\")\n", - " \n", - " # Test query\n", - " test_query = \"What are the benefits of AI agents?\"\n", - " print(f\"\\n๐Ÿ” Testing query: '{test_query}'\")\n", - " \n", - " # Test the query\n", - " response = filtered_engine.query(test_query)\n", - " print(f\"๐Ÿ“ Response: {response}\")\n", - " else:\n", - " print(\"โŒ Failed to create filtered query engine\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Response Synthesizers - TreeSummarize\n", - "\n", - "**Concept:** Response synthesizers control how retrieved information becomes final answers. `TreeSummarize` builds responses hierarchically, ideal for complex analytical questions.\n", - "\n", - "**Why it matters:** Different synthesis strategies work better for different query types. TreeSummarize excels at comprehensive analysis and long-form responses.\n", - "\n", - "Complete the function below to create a query engine with TreeSummarize response synthesis.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:11:22,888 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Query engine with TreeSummarize created\n", - "\n", - "๐Ÿ” Testing analytical query: 'Compare the advantages and disadvantages of different AI agent frameworks'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:11:24,584 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“ TreeSummarize Response:\n", - "Different AI agent frameworks offer various advantages and disadvantages based on their design and application.\n", - "\n", - "Advantages:\n", - "1. **Rapid Development and Deployment**: Frameworks like Agno and CrewAI facilitate quick development and deployment of robust AI systems, particularly in sectors like finance.\n", - "2. **Dynamic and Autonomous Capabilities**: Architectures that leverage dynamic teams and autonomous agents are effective across diverse benchmarks and problem types. They can adapt to changing needs by bringing agents in and out of the system as required.\n", - "3. **Enhanced Performance**: Both single and multi-agent patterns show strong performance in complex tasks involving reasoning and tool execution. Multi-agent systems, in particular, benefit from having clear leaders, defined planning phases, and dynamic teams with specific skills.\n", - "\n", - "Disadvantages:\n", - "1. **Evaluation Challenges**: There is a lack of standardized benchmarks for evaluating AI agents, making it difficult to compare different implementations. Many benchmarks are unique to specific research teams and involve complex, manually scored evaluations.\n", - "2. **Real-World Applicability and Bias**: Current AI-driven agents face challenges in real-world applicability and in mitigating biases inherent in language models.\n", - "3. **Complexity in Multi-Agent Systems**: While multi-agent systems can be highly effective, they require careful coordination and communication strategies to avoid unproductive interactions and\n" - ] - } - ], - "source": [ - "def create_query_engine_with_tree_summarize(index, top_k: int = 5):\n", - " \"\"\"\n", - " Create a query engine that uses TreeSummarize for comprehensive responses.\n", - " \n", - " TODO: Complete this function to create a query engine with TreeSummarize synthesis.\n", - " HINT: Create a TreeSummarize instance, then use index.as_query_engine() with response_synthesizer parameter\n", - " \n", - " Args:\n", - " index: Vector index to query\n", - " top_k: Number of results to retrieve\n", - " \n", - " Returns:\n", - " Query engine with TreeSummarize synthesis\n", - " \"\"\"\n", - " # Create TreeSummarize response synthesizer\n", - " tree_synthesizer = TreeSummarize()\n", - " \n", - " # Create query engine with the synthesizer\n", - " query_engine = index.as_query_engine(\n", - " similarity_top_k=top_k,\n", - " response_synthesizer=tree_synthesizer\n", - " )\n", - " \n", - " return query_engine\n", - "\n", - "# Test the function\n", - "if index:\n", - " tree_engine = create_query_engine_with_tree_summarize(index)\n", - " \n", - " if tree_engine:\n", - " print(\"โœ… Query engine with TreeSummarize created\")\n", - " \n", - " # Test with a complex analytical query\n", - " analytical_query = \"Compare the advantages and disadvantages of different AI agent frameworks\"\n", - " print(f\"\\n๐Ÿ” Testing analytical query: '{analytical_query}'\")\n", - " \n", - " # Test the query\n", - " response = tree_engine.query(analytical_query)\n", - " print(f\"๐Ÿ“ TreeSummarize Response:\\n{response}\")\n", - " else:\n", - " print(\"โŒ Failed to create TreeSummarize query engine\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Structured Outputs with Pydantic Models\n", - "\n", - "**Concept:** Structured outputs ensure predictable, parseable responses using Pydantic models. This is essential for API endpoints and data pipelines.\n", - "\n", - "**Why it matters:** Instead of free-text responses, you get type-safe, validated data structures that applications can reliably process.\n", - "\n", - "Complete the function below to create a structured output system for extracting research paper information.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:11:35,908 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Structured output program created\n", - "\n", - "๐Ÿ” Testing structured query: 'Tell me about AI agents and their capabilities'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:11:37,199 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“Š Structured Response:\n", - "title='AI Agents and Their Capabilities' key_points=['AI-driven agents show promise but have limitations and areas for improvement.', 'Challenges include comprehensive benchmarks, real-world applicability, and mitigating harmful biases.', 'Dynamic teams of agents can be effective, with agents brought in based on need.', 'Single and multi-agent patterns perform well on complex tasks with reasoning and tool execution.', 'Agent architectures with clear leadership, planning phases, and dynamic teams improve performance.'] applications=['AI-driven agents can be used in dynamic team settings for task planning and execution.', 'Single agent patterns are effective for tasks requiring defined personas and iterative feedback.', 'Multi-agent systems can collaborate on complex goals with intelligent message filtering.'] summary='AI agents are evolving from static language models to dynamic, autonomous systems. While they show strong performance in various tasks, challenges remain in evaluation, reliability, and bias mitigation. Effective agent architectures often include dynamic teams and clear planning strategies.'\n", - "\n", - "๐Ÿ’ก Expected output format:\n", - " - title: String\n", - " - key_points: List of strings\n", - " - applications: List of strings\n", - " - summary: String\n" - ] - } - ], - "source": [ - "# First, define the Pydantic models for structured outputs \n", - "class ResearchPaperInfo(BaseModel):\n", - " \"\"\"Structured information about a research paper or AI concept.\"\"\"\n", - " title: str = Field(description=\"The main title or concept name\")\n", - " key_points: List[str] = Field(description=\"3-5 main points or findings\")\n", - " applications: List[str] = Field(description=\"Practical applications or use cases\")\n", - " summary: str = Field(description=\"Brief 2-3 sentence summary\")\n", - "\n", - "# Import the missing component\n", - "from llama_index.core.program import LLMTextCompletionProgram\n", - "\n", - "def create_structured_output_program(output_model: BaseModel = ResearchPaperInfo):\n", - " \"\"\"\n", - " Create a structured output program using Pydantic models.\n", - " \n", - " TODO: Complete this function to create a structured output program.\n", - " HINT: Use LLMTextCompletionProgram.from_defaults() with PydanticOutputParser and a prompt template\n", - " \n", - " Args:\n", - " output_model: Pydantic model class for structured output\n", - " \n", - " Returns:\n", - " LLMTextCompletionProgram that returns structured data\n", - " \"\"\"\n", - " # Create output parser with the Pydantic model\n", - " output_parser = PydanticOutputParser(output_model)\n", - " \n", - " # Create the structured output program\n", - " program = LLMTextCompletionProgram.from_defaults(\n", - " output_parser=output_parser,\n", - " prompt_template_str=(\n", - " \"Extract structured information from the following context:\\n\"\n", - " \"{context}\\n\\n\"\n", - " \"Question: {query}\\n\\n\"\n", - " \"Provide the information in the specified JSON format.\"\n", - " ),\n", - " verbose=True\n", - " )\n", - "\n", - " return program\n", - "\n", - "# Test the function\n", - "if index:\n", - " structured_program = create_structured_output_program(ResearchPaperInfo)\n", - " \n", - " if structured_program:\n", - " print(\"โœ… Structured output program created\")\n", - " \n", - " # Test with retrieval and structured extraction\n", - " structure_query = \"Tell me about AI agents and their capabilities\"\n", - " print(f\"\\n๐Ÿ” Testing structured query: '{structure_query}'\")\n", - " \n", - " # Get context for structured extraction\n", - " retriever = VectorIndexRetriever(index=index, similarity_top_k=3)\n", - " nodes = retriever.retrieve(structure_query)\n", - " context = \"\\n\".join([node.text for node in nodes])\n", - " \n", - " # Get structured output\n", - " response = structured_program(context=context, query=structure_query)\n", - " print(f\"๐Ÿ“Š Structured Response:\\n{response}\")\n", - " \n", - " print(\"\\n๐Ÿ’ก Expected output format:\")\n", - " print(\" - title: String\")\n", - " print(\" - key_points: List of strings\")\n", - " print(\" - applications: List of strings\") \n", - " print(\" - summary: String\")\n", - " else:\n", - " print(\"โŒ Failed to create structured output program\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Advanced Pipeline - Combining All Techniques\n", - "\n", - "**Concept:** Combine multiple advanced techniques into a single powerful query engine: similarity filtering + response synthesis + structured output.\n", - "\n", - "**Why it matters:** Production RAG systems often need multiple techniques working together for optimal results.\n", - "\n", - "Complete the function below to create a comprehensive advanced RAG pipeline.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:19,878 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Advanced RAG pipeline created successfully!\n", - " ๐Ÿ”ง Similarity filtering: โœ…\n", - " ๐ŸŒณ TreeSummarize synthesis: โœ…\n", - "\n", - "๐Ÿ” Testing complex query: 'Analyze the current state and future potential of AI agent technologies'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:20,886 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Advanced RAG Response:\n", - "The current state of AI agent technologies is promising, with advancements in their ability to achieve complex goals through enhanced reasoning, planning, and tool execution capabilities. These technologies are effective across various benchmarks and problem types, particularly when employing single or multi-agent architectures. Single-agent systems perform well with defined roles and iterative feedback, while multi-agent systems benefit from dynamic team structures and intelligent communication strategies.\n", - "\n", - "However, there are notable limitations that need addressing for future improvements. Challenges include the development of comprehensive benchmarks for evaluating agents, ensuring real-world applicability, and mitigating biases inherent in language models. Future potential lies in overcoming these challenges, which would enable more reliable and robust AI agents. Additionally, the integration of structured communication and information-sharing mechanisms, as seen in systems like MetaGPT, could further enhance the performance and efficiency of multi-agent architectures. Overall, the progression from static models to dynamic, autonomous agents offers significant opportunities for innovation in AI agent design and implementation.\n", - "\n", - "๐ŸŽฏ This should provide:\n", - " - Filtered relevant results only\n", - " - Comprehensive analytical response\n", - " - Combined postprocessing and synthesis\n" - ] - } - ], - "source": [ - "def create_advanced_rag_pipeline(index, similarity_cutoff: float = 0.3, top_k: int = 10):\n", - " \"\"\"\n", - " Create a comprehensive advanced RAG pipeline combining multiple techniques.\n", - " \n", - " TODO: Complete this function to create the ultimate advanced RAG query engine.\n", - " HINT: Combine SimilarityPostprocessor + TreeSummarize using index.as_query_engine()\n", - " \n", - " Args:\n", - " index: Vector index to query\n", - " similarity_cutoff: Minimum similarity score for filtering\n", - " top_k: Number of initial results to retrieve\n", - " \n", - " Returns:\n", - " Advanced query engine with filtering and synthesis combined\n", - " \"\"\"\n", - " # Create similarity postprocessor\n", - " similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n", - " \n", - " # Create TreeSummarize for comprehensive responses\n", - " tree_synthesizer = TreeSummarize()\n", - " \n", - " # Create the comprehensive query engine combining both techniques\n", - " advanced_engine = index.as_query_engine(\n", - " similarity_top_k=top_k,\n", - " node_postprocessors=[similarity_processor],\n", - " response_synthesizer=tree_synthesizer\n", - " )\n", - " \n", - " return advanced_engine\n", - "\n", - "# Test the comprehensive pipeline\n", - "if index:\n", - " advanced_pipeline = create_advanced_rag_pipeline(index)\n", - " \n", - " if advanced_pipeline:\n", - " print(\"โœ… Advanced RAG pipeline created successfully!\")\n", - " print(\" ๐Ÿ”ง Similarity filtering: โœ…\")\n", - " print(\" ๐ŸŒณ TreeSummarize synthesis: โœ…\")\n", - " \n", - " # Test with complex query\n", - " complex_query = \"Analyze the current state and future potential of AI agent technologies\"\n", - " print(f\"\\n๐Ÿ” Testing complex query: '{complex_query}'\")\n", - " \n", - " # Test the pipeline\n", - " response = advanced_pipeline.query(complex_query)\n", - " print(f\"๐Ÿš€ Advanced RAG Response:\\n{response}\")\n", - " \n", - " print(\"\\n๐ŸŽฏ This should provide:\")\n", - " print(\" - Filtered relevant results only\")\n", - " print(\" - Comprehensive analytical response\")\n", - " print(\" - Combined postprocessing and synthesis\")\n", - " else:\n", - " print(\"โŒ Failed to create advanced RAG pipeline\")\n", - "else:\n", - " print(\"โŒ No index available - run previous cells first\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Final Test - Compare Basic vs Advanced RAG\n", - "\n", - "Once you've completed all the functions above, run this cell to compare basic RAG with your advanced techniques.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:46,537 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\n", - "============================================================\n", - "\n", - "๐Ÿ“Š Component Status:\n", - " โœ… Basic Index\n", - " โœ… Similarity Filter\n", - " โœ… TreeSummarize\n", - " โœ… Structured Output\n", - " โœ… Advanced Pipeline\n", - "\n", - "๐Ÿ” Creating basic query engine for comparison...\n", - "\n", - "============================================================\n", - "๐Ÿ†š COMPARISON: Basic vs Advanced RAG\n", - "============================================================\n", - "\n", - "๐Ÿ“‹ Test Query 1: 'What are the key capabilities of AI agents?'\n", - "--------------------------------------------------\n", - "๐Ÿ”น Basic RAG:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:47,507 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:12:48,692 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Response: AI agents exhibit several key capabilities, including strong performance on complex tasks that involve reasoning and tool execution. They can operate effectively as single agents with defined personas...\n", - "\n", - "๐Ÿ”ธ Advanced RAG:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:49,357 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:12:50,626 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Response: AI agents are designed to extend the capabilities of language models to solve real-world challenges. Key capabilities include robust problem-solving skills, reasoning, planning, and the ability to call tools that interact with external environments. These capabilities enable agents to perform well on novel tasks by reasoning over multiple steps and accessing outside information. Additionally, effective agent architectures often involve dynamic teams with specific skills relevant to current tasks, intelligent message filtering, and opportunities for plan refinement as new information is learned.\n", - "\n", - "๐Ÿ“‹ Test Query 2: 'How do you evaluate agent performance metrics?'\n", - "--------------------------------------------------\n", - "๐Ÿ”น Basic RAG:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:51,652 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:12:53,427 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Response: Evaluating agent performance metrics involves using both objective and subjective measures. Objective metrics include success rate, output similarity to human responses, and overall efficiency. These ...\n", - "\n", - "๐Ÿ”ธ Advanced RAG:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:54,140 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:12:57,513 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Response: Evaluating agent performance metrics involves using both objective and subjective measures. Objective metrics include success rate, output similarity to human responses, and overall efficiency. These metrics provide a quantitative assessment of an agent's capabilities. Subjective measures, which are equally important, focus on aspects like the efficiency of tool use, reliability, and robustness of planning. These often require evaluation by human experts, which can be more costly and time-consuming. Additionally, benchmarks like AgentBench and SmartPlay are used to evaluate agents in various environments, providing insights into their ability to generalize and perform tasks involving reasoning, planning, and tool usage. Real-world benchmarks, such as WildBench, also play a role by using data from actual conversations to assess performance across a wide range of tasks.\n", - "\n", - "๐Ÿ“‹ Test Query 3: 'Explain the benefits and challenges of multimodal AI systems'\n", - "--------------------------------------------------\n", - "๐Ÿ”น Basic RAG:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:12:58,531 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 14:12:59,779 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Response: Multimodal AI systems offer several benefits, including the ability to process and integrate information from multiple sources, such as text, images, and audio, which can lead to more comprehensive un...\n", - "\n", - "๐Ÿ”ธ Advanced RAG:\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-11-02 14:13:00,446 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Response: Multimodal AI systems offer several benefits, including enhanced understanding and interaction capabilities by integrating multiple types of data such as text, images, and audio. This integration allows for more comprehensive analysis and decision-making, as the systems can draw on diverse data sources to improve accuracy and context-awareness. Additionally, multimodal systems can provide more natural and intuitive user interactions, as they can process and respond to inputs in various forms, similar to human communication.\n", - "\n", - "However, these systems also face challenges. One significant challenge is the complexity of effectively integrating and processing different data modalities, which often require distinct processing techniques and models. Ensuring seamless interaction between these modalities can be technically demanding. Another challenge is the increased computational resources required to handle and analyze the diverse data types, which can impact the system's efficiency and scalability. Furthermore, developing robust evaluation benchmarks for multimodal systems can be difficult, as it involves assessing performance across multiple data types and tasks.\n", - "\n", - "============================================================\n", - "๐ŸŽฏ Assignment Status:\n", - " Completed: 5/5 components\n", - "\n", - "๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\n", - " โœ… Node postprocessors for result filtering\n", - " โœ… Response synthesizers for better answers\n", - " โœ… Structured outputs for reliable data\n", - " โœ… Advanced pipelines combining all techniques\n", - "\n", - "๐Ÿš€ You're ready for production RAG systems!\n", - "\n", - "๐Ÿ’ก Key learnings:\n", - " - Postprocessors improve result relevance and precision\n", - " - Different synthesizers work better for different query types\n", - " - Structured outputs enable reliable system integration\n", - " - Advanced techniques can be combined for production systems\n" - ] - } - ], - "source": [ - "# Final comparison: Basic vs Advanced RAG\n", - "print(\"๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\")\n", - "print(\"=\" * 60)\n", - "\n", - "# Test queries for comparison\n", - "test_queries = [\n", - " \"What are the key capabilities of AI agents?\",\n", - " \"How do you evaluate agent performance metrics?\",\n", - " \"Explain the benefits and challenges of multimodal AI systems\"\n", - "]\n", - "\n", - "# Check if all components were created\n", - "components_status = {\n", - " \"Basic Index\": index is not None,\n", - " \"Similarity Filter\": 'filtered_engine' in locals() and filtered_engine is not None,\n", - " \"TreeSummarize\": 'tree_engine' in locals() and tree_engine is not None,\n", - " \"Structured Output\": 'structured_program' in locals() and structured_program is not None,\n", - " \"Advanced Pipeline\": 'advanced_pipeline' in locals() and advanced_pipeline is not None\n", - "}\n", - "\n", - "print(\"\\n๐Ÿ“Š Component Status:\")\n", - "for component, status in components_status.items():\n", - " status_icon = \"โœ…\" if status else \"โŒ\"\n", - " print(f\" {status_icon} {component}\")\n", - "\n", - "# Create basic query engine for comparison\n", - "if index:\n", - " print(\"\\n๐Ÿ” Creating basic query engine for comparison...\")\n", - " basic_engine = index.as_query_engine(similarity_top_k=5)\n", - " \n", - " print(\"\\n\" + \"=\" * 60)\n", - " print(\"๐Ÿ†š COMPARISON: Basic vs Advanced RAG\")\n", - " print(\"=\" * 60)\n", - " \n", - " for i, query in enumerate(test_queries, 1):\n", - " print(f\"\\n๐Ÿ“‹ Test Query {i}: '{query}'\")\n", - " print(\"-\" * 50)\n", - " \n", - " # Basic RAG\n", - " print(\"๐Ÿ”น Basic RAG:\")\n", - " if basic_engine:\n", - " basic_response = basic_engine.query(query)\n", - " print(f\" Response: {str(basic_response)[:200]}...\")\n", - " \n", - " # Advanced RAG (if implemented)\n", - " print(\"\\n๐Ÿ”ธ Advanced RAG:\")\n", - " if components_status[\"Advanced Pipeline\"]:\n", - " advanced_response = advanced_pipeline.query(query)\n", - " print(f\" Response: {advanced_response}\")\n", - " else:\n", - " print(\" Complete the advanced pipeline function to test\")\n", - "\n", - "# Final status\n", - "print(\"\\n\" + \"=\" * 60)\n", - "print(\"๐ŸŽฏ Assignment Status:\")\n", - "completed_count = sum(components_status.values())\n", - "total_count = len(components_status)\n", - "\n", - "print(f\" Completed: {completed_count}/{total_count} components\")\n", - "\n", - "if completed_count == total_count:\n", - " print(\"\\n๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\")\n", - " print(\" โœ… Node postprocessors for result filtering\")\n", - " print(\" โœ… Response synthesizers for better answers\")\n", - " print(\" โœ… Structured outputs for reliable data\")\n", - " print(\" โœ… Advanced pipelines combining all techniques\")\n", - " print(\"\\n๐Ÿš€ You're ready for production RAG systems!\")\n", - "else:\n", - " missing = total_count - completed_count\n", - " print(f\"\\n๐Ÿ“ Complete {missing} more components to finish the assignment:\")\n", - " for component, status in components_status.items():\n", - " if not status:\n", - " print(f\" - {component}\")\n", - "\n", - "print(\"\\n๐Ÿ’ก Key learnings:\")\n", - "print(\" - Postprocessors improve result relevance and precision\")\n", - "print(\" - Different synthesizers work better for different query types\")\n", - "print(\" - Structured outputs enable reliable system integration\")\n", - "print(\" - Advanced techniques can be combined for production systems\")\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_3a_basic_gradio_rag.ipynb b/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_3a_basic_gradio_rag.ipynb deleted file mode 100644 index 62e94f3..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_3a_basic_gradio_rag.ipynb +++ /dev/null @@ -1,434 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 3a: Basic Gradio RAG Frontend\n", - "## Day 6 Session 2 - Building Simple RAG Applications\n", - "\n", - "In this assignment, you'll build a simple Gradio frontend for your RAG system with just the essential features:\n", - "- Button to initialize the vector database\n", - "- Search query input and button\n", - "- Display of AI responses\n", - "\n", - "**Learning Objectives:**\n", - "- Create basic Gradio interfaces\n", - "- Connect RAG backend to frontend\n", - "- Handle user interactions and database initialization\n", - "- Build functional AI-powered web applications\n", - "\n", - "**Prerequisites:**\n", - "- Completed Assignment 1 (Vector Database Basics)\n", - "- Completed Assignment 2 (Advanced RAG)\n", - "- Understanding of LlamaIndex fundamentals\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿ“š Part 1: Setup and Imports\n", - "\n", - "Import all necessary libraries for building your Gradio RAG application.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… All libraries imported successfully!\n" - ] - } - ], - "source": [ - "# Import required libraries\n", - "import gradio as gr\n", - "import os\n", - "from pathlib import Path\n", - "\n", - "# LlamaIndex components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "\n", - "print(\"โœ… All libraries imported successfully!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿค– Part 2: RAG Backend Class\n", - "\n", - "Create a simple RAG backend that can initialize the database and answer queries.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ RAG Backend initialized and ready!\n" - ] - } - ], - "source": [ - "class SimpleRAGBackend:\n", - " \"\"\"Simple RAG backend for Gradio frontend.\"\"\"\n", - " \n", - " def __init__(self):\n", - " self.index = None\n", - " self.setup_settings()\n", - " \n", - " def setup_settings(self):\n", - " \"\"\"Configure LlamaIndex settings.\"\"\"\n", - " # Set up the LLM using OpenRouter\n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " if api_key:\n", - " Settings.llm = OpenRouter(\n", - " api_key=api_key,\n", - " model=\"gpt-4o\",\n", - " temperature=0.1\n", - " )\n", - " \n", - " # Set up the embedding model\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " # Set chunking parameters\n", - " Settings.chunk_size = 512\n", - " Settings.chunk_overlap = 50\n", - " \n", - " def initialize_database(self, data_folder=\"../data\"):\n", - " \"\"\"Initialize the vector database with documents.\"\"\"\n", - " # Check if data folder exists\n", - " if not Path(data_folder).exists():\n", - " return f\"โŒ Data folder '{data_folder}' not found!\"\n", - " \n", - " try:\n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./basic_rag_vectordb\",\n", - " table_name=\"documents\"\n", - " )\n", - " \n", - " # Load documents\n", - " reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n", - " documents = reader.load_data()\n", - " \n", - " # Create storage context and index\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " self.index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " return f\"โœ… Database initialized successfully with {len(documents)} documents!\"\n", - " \n", - " except Exception as e:\n", - " return f\"โŒ Error initializing database: {str(e)}\"\n", - " \n", - " def query(self, question):\n", - " \"\"\"Query the RAG system and return response.\"\"\"\n", - " # Check if index exists\n", - " if self.index is None:\n", - " return \"โŒ Please initialize the database first!\"\n", - " \n", - " # Check if question is empty\n", - " if not question or not question.strip():\n", - " return \"โš ๏ธ Please enter a question first!\"\n", - " \n", - " try:\n", - " # Create query engine and get response\n", - " query_engine = self.index.as_query_engine()\n", - " response = query_engine.query(question)\n", - " return str(response)\n", - " \n", - " except Exception as e:\n", - " return f\"โŒ Error processing query: {str(e)}\"\n", - "\n", - "# Initialize the backend\n", - "rag_backend = SimpleRAGBackend()\n", - "print(\"๐Ÿš€ RAG Backend initialized and ready!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐ŸŽจ Part 3: Gradio Interface\n", - "\n", - "Create a simple Gradio interface with:\n", - "1. Button to initialize the database\n", - "2. Text input for queries\n", - "3. Button to submit queries\n", - "4. Text output for responses\n", - "5. Text output for status messages\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Basic RAG interface created successfully!\n" - ] - } - ], - "source": [ - "def create_basic_rag_interface():\n", - " \"\"\"Create basic RAG interface with essential features.\"\"\"\n", - " \n", - " def initialize_db():\n", - " \"\"\"Handle database initialization.\"\"\"\n", - " return rag_backend.initialize_database()\n", - " \n", - " def handle_query(question):\n", - " \"\"\"Handle user queries.\"\"\"\n", - " return rag_backend.query(question)\n", - " \n", - " # Create Gradio interface using gr.Blocks()\n", - " with gr.Blocks(title=\"Basic RAG Assistant\") as interface:\n", - " # Add title and description\n", - " gr.Markdown(\"# ๐Ÿค– Basic RAG Assistant\")\n", - " gr.Markdown(\"Ask questions about your documents using AI-powered retrieval-augmented generation!\")\n", - " gr.Markdown(\"---\")\n", - " \n", - " # Add initialization section\n", - " gr.Markdown(\"## ๐Ÿ“ Step 1: Initialize Database\")\n", - " gr.Markdown(\"First, click the button below to load and index your documents.\")\n", - " \n", - " init_btn = gr.Button(\"๐Ÿš€ Initialize Database\", variant=\"primary\")\n", - " status_output = gr.Textbox(\n", - " label=\"Status\", \n", - " placeholder=\"Click 'Initialize Database' to begin...\",\n", - " interactive=False,\n", - " lines=2\n", - " )\n", - " \n", - " gr.Markdown(\"---\")\n", - " \n", - " # Add query section \n", - " gr.Markdown(\"## ๐Ÿ’ฌ Step 2: Ask Questions\")\n", - " gr.Markdown(\"Once the database is initialized, you can ask questions below.\")\n", - " \n", - " query_input = gr.Textbox(\n", - " label=\"Enter your question\",\n", - " placeholder=\"e.g., What are the main topics in the documents?\",\n", - " lines=3\n", - " )\n", - " \n", - " submit_btn = gr.Button(\"๐Ÿ” Ask Question\", variant=\"primary\")\n", - " \n", - " response_output = gr.Textbox(\n", - " label=\"AI Response\",\n", - " placeholder=\"Your response will appear here...\",\n", - " interactive=False,\n", - " lines=10\n", - " )\n", - " \n", - " # Connect buttons to functions\n", - " init_btn.click(initialize_db, outputs=[status_output])\n", - " submit_btn.click(handle_query, inputs=[query_input], outputs=[response_output])\n", - " \n", - " return interface\n", - "\n", - "# Create the interface\n", - "basic_interface = create_basic_rag_interface()\n", - "print(\"โœ… Basic RAG interface created successfully!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿš€ Part 4: Launch Your Application\n", - "\n", - "Launch your Gradio application and test it!\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐ŸŽ‰ Launching your Basic RAG Assistant...\n", - "๐Ÿ”— Your application will open in a new browser tab!\n", - "\n", - "๐Ÿ“‹ Testing Instructions:\n", - "1. Click 'Initialize Database' button first\n", - "2. Wait for success message\n", - "3. Enter a question in the query box\n", - "4. Click 'Ask Question' to get AI response\n", - "\n", - "๐Ÿ’ก Example questions to try:\n", - "- What are the main topics in the documents?\n", - "- Summarize the key findings\n", - "- Explain the methodology used\n", - "\n", - "๐Ÿš€ Launch your app:\n", - "* Running on local URL: http://127.0.0.1:7860\n", - "* To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Table documents doesn't exist yet. Please add some data to create it.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\ai_agents.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\in_the_end.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\rags.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\whisper\\transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 39/39 [00:00<00:00, 64.96it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 92/92 [00:26<00:00, 3.47it/s]\n", - "2025-11-02 15:08:16,242 - INFO - Create new table documents adding data.\n", - "2025-11-02 15:27:19,691 - INFO - query_type :, vector\n", - "2025-11-02 15:27:21,925 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:27:36,118 - INFO - query_type :, vector\n", - "2025-11-02 15:27:37,756 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:27:53,106 - INFO - query_type :, vector\n", - "2025-11-02 15:27:54,668 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:28:07,848 - INFO - query_type :, vector\n", - "2025-11-02 15:28:08,847 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - } - ], - "source": [ - "print(\"๐ŸŽ‰ Launching your Basic RAG Assistant...\")\n", - "print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n", - "print(\"\")\n", - "print(\"๐Ÿ“‹ Testing Instructions:\")\n", - "print(\"1. Click 'Initialize Database' button first\")\n", - "print(\"2. Wait for success message\")\n", - "print(\"3. Enter a question in the query box\")\n", - "print(\"4. Click 'Ask Question' to get AI response\")\n", - "print(\"\")\n", - "print(\"๐Ÿ’ก Example questions to try:\")\n", - "print(\"- What are the main topics in the documents?\")\n", - "print(\"- Summarize the key findings\")\n", - "print(\"- Explain the methodology used\")\n", - "print(\"\")\n", - "print(\"๐Ÿš€ Launch your app:\")\n", - "\n", - "# Launch the interface\n", - "basic_interface.launch()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## โœ… Assignment Completion Checklist\n", - "\n", - "Before submitting, ensure you have:\n", - "\n", - "- [x] RAG backend is provided and working\n", - "- [ ] Created Gradio interface with required components:\n", - " - [ ] Title and description using gr.Markdown()\n", - " - [ ] Initialize database button using gr.Button()\n", - " - [ ] Status output using gr.Textbox()\n", - " - [ ] Query input field using gr.Textbox()\n", - " - [ ] Submit query button using gr.Button()\n", - " - [ ] Response output area using gr.Textbox()\n", - "- [ ] Connected buttons to backend functions using .click()\n", - "- [ ] Successfully launched the application\n", - "- [ ] Tested the full workflow (initialize โ†’ query โ†’ response)\n", - "\n", - "## ๐ŸŽŠ Congratulations!\n", - "\n", - "You've successfully built your first Gradio RAG application! You now have:\n", - "\n", - "- A functional web interface for your RAG system\n", - "- Understanding of Gradio basics and component connections\n", - "- A foundation for building more complex AI applications\n", - "\n", - "**Next Steps**: Complete Assignment 3b to add advanced configuration options to your RAG interface!\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_3b_advanced_gradio_rag.ipynb b/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_3b_advanced_gradio_rag.ipynb deleted file mode 100644 index e1a0656..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/assignments/assignment_3b_advanced_gradio_rag.ipynb +++ /dev/null @@ -1,724 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assignment 3b: Advanced Gradio RAG Frontend\n", - "## Day 6 Session 2 - Building Configurable RAG Applications\n", - "\n", - "In this assignment, you'll extend your basic RAG interface with advanced configuration options to create a professional, feature-rich RAG application.\n", - "\n", - "**New Features to Add:**\n", - "- Model selection dropdown (gpt-4o, gpt-4o-mini)\n", - "- Temperature slider (0 to 1 with 0.1 intervals)\n", - "- Chunk size configuration\n", - "- Chunk overlap configuration \n", - "- Similarity top-k slider\n", - "- Node postprocessor multiselect\n", - "- Similarity cutoff slider\n", - "- Response synthesizer multiselect\n", - "\n", - "**Learning Objectives:**\n", - "- Advanced Gradio components and interactions\n", - "- Dynamic RAG configuration\n", - "- Professional UI design patterns\n", - "- Parameter validation and handling\n", - "- Building production-ready AI applications\n", - "\n", - "**Prerequisites:**\n", - "- Completed Assignment 3a (Basic Gradio RAG)\n", - "- Understanding of RAG parameters and their effects\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿ“š Part 1: Setup and Imports\n", - "\n", - "Import all necessary libraries including advanced RAG components for configuration options.\n", - "\n", - "**Note:** This assignment uses OpenRouter for LLM access (not OpenAI). Make sure you have your `OPENROUTER_API_KEY` environment variable set.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… All libraries imported successfully!\n" - ] - } - ], - "source": [ - "# Import all required libraries\n", - "import gradio as gr\n", - "import os\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Any\n", - "\n", - "# LlamaIndex core components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "\n", - "# Advanced RAG components\n", - "from llama_index.core.postprocessor import SimilarityPostprocessor\n", - "from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "\n", - "print(\"โœ… All libraries imported successfully!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿค– Part 2: Advanced RAG Backend Class\n", - "\n", - "Create an advanced RAG backend that supports dynamic configuration of all parameters.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Advanced RAG Backend initialized and ready!\n" - ] - } - ], - "source": [ - "class AdvancedRAGBackend:\n", - " \"\"\"Advanced RAG backend with configurable parameters.\"\"\"\n", - " \n", - " def __init__(self):\n", - " self.index = None\n", - " self.available_models = [\"gpt-4o\", \"gpt-4o-mini\"]\n", - " self.available_postprocessors = [\"SimilarityPostprocessor\"]\n", - " self.available_synthesizers = [\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"]\n", - " self.update_settings()\n", - " \n", - " def update_settings(self, model: str = \"gpt-4o-mini\", temperature: float = 0.1, chunk_size: int = 512, chunk_overlap: int = 50):\n", - " \"\"\"Update LlamaIndex settings based on user configuration.\"\"\"\n", - " # Set up the LLM using OpenRouter\n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " if api_key:\n", - " Settings.llm = OpenRouter(\n", - " api_key=api_key,\n", - " model=model,\n", - " temperature=temperature\n", - " )\n", - " \n", - " # Set up the embedding model (keep this constant)\n", - " Settings.embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\",\n", - " trust_remote_code=True\n", - " )\n", - " \n", - " # Set chunking parameters from function parameters\n", - " Settings.chunk_size = chunk_size\n", - " Settings.chunk_overlap = chunk_overlap\n", - " \n", - " def initialize_database(self, data_folder=\"../data\"):\n", - " \"\"\"Initialize the vector database with documents.\"\"\"\n", - " # Check if data folder exists\n", - " if not Path(data_folder).exists():\n", - " return f\"โŒ Data folder '{data_folder}' not found!\"\n", - " \n", - " try:\n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./advanced_rag_vectordb\",\n", - " table_name=\"documents\"\n", - " )\n", - " \n", - " # Load documents\n", - " reader = SimpleDirectoryReader(input_dir=data_folder, recursive=True)\n", - " documents = reader.load_data()\n", - " \n", - " # Create storage context and index\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " self.index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context,\n", - " show_progress=True\n", - " )\n", - " \n", - " return f\"โœ… Database initialized successfully with {len(documents)} documents!\"\n", - " \n", - " except Exception as e:\n", - " return f\"โŒ Error initializing database: {str(e)}\"\n", - " \n", - " def get_postprocessor(self, postprocessor_name: str, similarity_cutoff: float):\n", - " \"\"\"Get the selected postprocessor.\"\"\"\n", - " if postprocessor_name == \"SimilarityPostprocessor\":\n", - " return SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n", - " elif postprocessor_name == \"None\":\n", - " return None\n", - " else:\n", - " return None\n", - " \n", - " def get_synthesizer(self, synthesizer_name: str):\n", - " \"\"\"Get the selected response synthesizer.\"\"\"\n", - " if synthesizer_name == \"TreeSummarize\":\n", - " return TreeSummarize()\n", - " elif synthesizer_name == \"Refine\":\n", - " return Refine()\n", - " elif synthesizer_name == \"CompactAndRefine\":\n", - " return CompactAndRefine()\n", - " elif synthesizer_name == \"Default\":\n", - " return None\n", - " else:\n", - " return None\n", - " \n", - " def advanced_query(self, question: str, model: str, temperature: float, \n", - " chunk_size: int, chunk_overlap: int, similarity_top_k: int,\n", - " postprocessor_names: List[str], similarity_cutoff: float,\n", - " synthesizer_name: str) -> Dict[str, Any]:\n", - " \"\"\"Query the RAG system with advanced configuration.\"\"\"\n", - " \n", - " # Check if index exists\n", - " if self.index is None:\n", - " return {\"response\": \"โŒ Please initialize the database first!\", \"sources\": [], \"config\": {}}\n", - " \n", - " # Check if question is empty\n", - " if not question or not question.strip():\n", - " return {\"response\": \"โš ๏ธ Please enter a question first!\", \"sources\": [], \"config\": {}}\n", - " \n", - " try:\n", - " # Update settings with new parameters\n", - " self.update_settings(model, temperature, chunk_size, chunk_overlap)\n", - " \n", - " # Get postprocessors\n", - " postprocessors = []\n", - " for name in postprocessor_names:\n", - " processor = self.get_postprocessor(name, similarity_cutoff)\n", - " if processor is not None:\n", - " postprocessors.append(processor)\n", - " \n", - " # Get synthesizer\n", - " synthesizer = self.get_synthesizer(synthesizer_name)\n", - " \n", - " # Create query engine with all parameters\n", - " query_engine_kwargs = {\"similarity_top_k\": similarity_top_k}\n", - " if postprocessors:\n", - " query_engine_kwargs[\"node_postprocessors\"] = postprocessors\n", - " if synthesizer is not None:\n", - " query_engine_kwargs[\"response_synthesizer\"] = synthesizer\n", - " \n", - " query_engine = self.index.as_query_engine(**query_engine_kwargs)\n", - " \n", - " # Query and get response\n", - " response = query_engine.query(question)\n", - " \n", - " # Extract source information if available\n", - " sources = []\n", - " if hasattr(response, 'source_nodes'):\n", - " for node in response.source_nodes:\n", - " sources.append({\n", - " \"text\": node.text[:200] + \"...\",\n", - " \"score\": getattr(node, 'score', 0.0),\n", - " \"source\": getattr(node.node, 'metadata', {}).get('file_name', 'Unknown')\n", - " })\n", - " \n", - " return {\n", - " \"response\": str(response),\n", - " \"sources\": sources,\n", - " \"config\": {\n", - " \"model\": model,\n", - " \"temperature\": temperature,\n", - " \"chunk_size\": chunk_size,\n", - " \"chunk_overlap\": chunk_overlap,\n", - " \"similarity_top_k\": similarity_top_k,\n", - " \"postprocessors\": postprocessor_names,\n", - " \"similarity_cutoff\": similarity_cutoff,\n", - " \"synthesizer\": synthesizer_name\n", - " }\n", - " }\n", - " \n", - " except Exception as e:\n", - " return {\"response\": f\"โŒ Error processing query: {str(e)}\", \"sources\": [], \"config\": {}}\n", - "\n", - "# Initialize the backend\n", - "rag_backend = AdvancedRAGBackend()\n", - "print(\"๐Ÿš€ Advanced RAG Backend initialized and ready!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐ŸŽจ Part 3: Advanced Gradio Interface\n", - "\n", - "Create a sophisticated Gradio interface with all the configuration options specified:\n", - "1. Database initialization button\n", - "2. Search query input and button \n", - "3. Model selection dropdown\n", - "4. Temperature slider\n", - "5. Chunk size and overlap inputs\n", - "6. Similarity top-k slider\n", - "7. Node postprocessor multiselect\n", - "8. Similarity cutoff slider\n", - "9. Response synthesizer multiselect\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Advanced RAG interface created successfully!\n" - ] - } - ], - "source": [ - "# ============================================================\n", - "# COMPLETE CODE FOR ASSIGNMENT 3B - CELL 3\n", - "# Copy and paste this entire code into Cell 3 of your notebook\n", - "# ============================================================\n", - "\n", - "def create_advanced_rag_interface():\n", - " \"\"\"Create advanced RAG interface with full configuration options.\"\"\"\n", - " \n", - " def initialize_db():\n", - " \"\"\"Handle database initialization.\"\"\"\n", - " return rag_backend.initialize_database()\n", - " \n", - " def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap, \n", - " similarity_top_k, postprocessors, similarity_cutoff, synthesizer):\n", - " \"\"\"Handle advanced RAG queries with all configuration options.\"\"\"\n", - " result = rag_backend.advanced_query(\n", - " question, model, temperature, chunk_size, chunk_overlap,\n", - " similarity_top_k, postprocessors, similarity_cutoff, synthesizer\n", - " )\n", - " \n", - " # Format configuration for display\n", - " config_text = f\"\"\"**Current Configuration:**\n", - "- Model: {result['config'].get('model', 'N/A')}\n", - "- Temperature: {result['config'].get('temperature', 'N/A')}\n", - "- Chunk Size: {result['config'].get('chunk_size', 'N/A')}\n", - "- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')}\n", - "- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')}\n", - "- Postprocessors: {', '.join(result['config'].get('postprocessors', []))}\n", - "- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')}\n", - "- Synthesizer: {result['config'].get('synthesizer', 'N/A')}\"\"\"\n", - " \n", - " return result[\"response\"], config_text\n", - " \n", - " # Create the advanced interface structure\n", - " with gr.Blocks(title=\"Advanced RAG Assistant\") as interface:\n", - " # Add title and description\n", - " gr.Markdown(\"# ๐Ÿš€ Advanced RAG Assistant\")\n", - " gr.Markdown(\"Configure and query your RAG system with advanced parameters!\")\n", - " gr.Markdown(\"---\")\n", - " \n", - " # Add database initialization section\n", - " gr.Markdown(\"## ๐Ÿ“ Step 1: Initialize Database\")\n", - " init_btn = gr.Button(\"๐Ÿš€ Initialize Vector Database\", variant=\"primary\")\n", - " status_output = gr.Textbox(\n", - " label=\"Initialization Status\",\n", - " placeholder=\"Click 'Initialize Vector Database' to begin...\",\n", - " interactive=False,\n", - " lines=2\n", - " )\n", - " \n", - " gr.Markdown(\"---\")\n", - " \n", - " # Create main layout with columns\n", - " gr.Markdown(\"## โš™๏ธ Configure & Query\")\n", - " with gr.Row():\n", - " with gr.Column(scale=1):\n", - " \n", - " gr.Markdown(\"### โš™๏ธ RAG Configuration\")\n", - " \n", - " # Model selection\n", - " model_dropdown = gr.Dropdown(\n", - " choices=[\"gpt-4o\", \"gpt-4o-mini\"],\n", - " value=\"gpt-4o-mini\",\n", - " label=\"Model\",\n", - " info=\"Choose the LLM model for responses\"\n", - " )\n", - " \n", - " # Temperature control\n", - " temperature_slider = gr.Slider(\n", - " minimum=0.0,\n", - " maximum=1.0,\n", - " step=0.1,\n", - " value=0.1,\n", - " label=\"Temperature\",\n", - " info=\"0.0 = deterministic, 1.0 = creative\"\n", - " )\n", - " \n", - " # Chunking parameters\n", - " chunk_size_input = gr.Number(\n", - " value=512,\n", - " label=\"Chunk Size\",\n", - " info=\"Size of document chunks (256-1024)\",\n", - " minimum=256,\n", - " maximum=1024,\n", - " step=64\n", - " )\n", - " \n", - " chunk_overlap_input = gr.Number(\n", - " value=50,\n", - " label=\"Chunk Overlap\",\n", - " info=\"Overlap between chunks (10-200)\",\n", - " minimum=10,\n", - " maximum=200,\n", - " step=10\n", - " )\n", - " \n", - " # Retrieval parameters\n", - " similarity_topk_slider = gr.Slider(\n", - " minimum=1,\n", - " maximum=20,\n", - " step=1,\n", - " value=5,\n", - " label=\"Similarity Top-K\",\n", - " info=\"Number of documents to retrieve (1-20)\"\n", - " )\n", - " \n", - " # Postprocessor selection\n", - " postprocessor_checkbox = gr.CheckboxGroup(\n", - " choices=[\"SimilarityPostprocessor\"],\n", - " label=\"Node Postprocessors\",\n", - " info=\"Filter and refine retrieval results\"\n", - " )\n", - " \n", - " # Similarity filtering\n", - " similarity_cutoff_slider = gr.Slider(\n", - " minimum=0.0,\n", - " maximum=1.0,\n", - " step=0.05,\n", - " value=0.3,\n", - " label=\"Similarity Cutoff\",\n", - " info=\"Minimum relevance score (0.0-1.0)\"\n", - " )\n", - " \n", - " # Response synthesizer\n", - " synthesizer_dropdown = gr.Dropdown(\n", - " choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"],\n", - " value=\"Default\",\n", - " label=\"Response Synthesizer\",\n", - " info=\"How to combine retrieved information\"\n", - " )\n", - " \n", - " with gr.Column(scale=2):\n", - " gr.Markdown(\"### ๐Ÿ’ฌ Query Interface\")\n", - " \n", - " # Query input\n", - " query_input = gr.Textbox(\n", - " label=\"Ask a question\",\n", - " placeholder=\"e.g., What are the main topics in the documents?\",\n", - " lines=3\n", - " )\n", - " \n", - " # Submit button\n", - " submit_btn = gr.Button(\"๐Ÿ” Ask Question\", variant=\"primary\")\n", - " \n", - " # Response output\n", - " response_output = gr.Textbox(\n", - " label=\"AI Response\",\n", - " placeholder=\"Your response will appear here...\",\n", - " interactive=False,\n", - " lines=12\n", - " )\n", - " \n", - " # Configuration display\n", - " config_display = gr.Textbox(\n", - " label=\"Configuration Used\",\n", - " placeholder=\"Configuration details will appear here after query...\",\n", - " interactive=False,\n", - " lines=8\n", - " )\n", - " \n", - " # Connect functions to components\n", - " init_btn.click(initialize_db, outputs=[status_output])\n", - " \n", - " submit_btn.click(\n", - " handle_advanced_query,\n", - " inputs=[\n", - " query_input, model_dropdown, temperature_slider,\n", - " chunk_size_input, chunk_overlap_input, similarity_topk_slider,\n", - " postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown\n", - " ],\n", - " outputs=[response_output, config_display]\n", - " )\n", - " \n", - " return interface\n", - "\n", - "# Create the interface\n", - "advanced_interface = create_advanced_rag_interface()\n", - "print(\"โœ… Advanced RAG interface created successfully!\")\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿš€ Part 4: Launch Your Advanced Application\n", - "\n", - "Launch your advanced Gradio application and test all the configuration options!\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐ŸŽ‰ Launching your Advanced RAG Assistant...\n", - "๐Ÿ”— Your application will open in a new browser tab!\n", - "\n", - "โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\n", - "\n", - "๐Ÿ“‹ Testing Instructions:\n", - "1. Click 'Initialize Vector Database' button first\n", - "2. Wait for success message\n", - "3. Configure your RAG parameters:\n", - " - Choose model (gpt-4o, gpt-4o-mini)\n", - " - Adjust temperature (0.0 = deterministic, 1.0 = creative)\n", - " - Set chunk size and overlap\n", - " - Choose similarity top-k\n", - " - Select postprocessors and synthesizer\n", - "4. Enter a question and click 'Ask Question'\n", - "5. Review both the response and configuration used\n", - "\n", - "๐Ÿงช Experiments to try:\n", - "- Compare different models with the same question\n", - "- Test temperature effects (0.1 vs 0.9)\n", - "- Try different chunk sizes (256 vs 1024)\n", - "- Compare synthesizers (TreeSummarize vs Refine)\n", - "- Adjust similarity cutoff to filter results\n", - "* Running on local URL: http://127.0.0.1:7861\n", - "* To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\ai_agents.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\in_the_end.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n", - "Failed to load file c:\\Users\\gengi\\OneDrive\\Desktop\\ai-accelerator-C2\\Day_6\\session_2\\assignments\\..\\data\\audio\\rags.mp3 with error: [WinError 2] The system cannot find the file specified. Skipping...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\python\\Lib\\site-packages\\whisper\\transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 39/39 [00:00<00:00, 52.56it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 92/92 [00:21<00:00, 4.32it/s]\n", - "2025-11-02 15:52:03,184 - INFO - Load pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\n", - "2025-11-02 15:52:07,000 - INFO - 1 prompt is loaded, with the key: query\n", - "2025-11-02 15:52:07,046 - INFO - query_type :, vector\n", - "2025-11-02 15:52:09,764 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:52:28,825 - INFO - Load pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\n", - "2025-11-02 15:52:32,394 - INFO - 1 prompt is loaded, with the key: query\n", - "2025-11-02 15:52:32,478 - INFO - query_type :, vector\n", - "2025-11-02 15:52:34,377 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:52:36,155 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:52:46,064 - INFO - Load pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\n", - "2025-11-02 15:52:51,913 - INFO - 1 prompt is loaded, with the key: query\n", - "2025-11-02 15:52:51,968 - INFO - query_type :, vector\n", - "2025-11-02 15:52:53,713 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-11-02 15:52:55,579 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - } - ], - "source": [ - "print(\"๐ŸŽ‰ Launching your Advanced RAG Assistant...\")\n", - "print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n", - "print(\"\")\n", - "print(\"โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\")\n", - "print(\"\")\n", - "print(\"๐Ÿ“‹ Testing Instructions:\")\n", - "print(\"1. Click 'Initialize Vector Database' button first\")\n", - "print(\"2. Wait for success message\")\n", - "print(\"3. Configure your RAG parameters:\")\n", - "print(\" - Choose model (gpt-4o, gpt-4o-mini)\")\n", - "print(\" - Adjust temperature (0.0 = deterministic, 1.0 = creative)\")\n", - "print(\" - Set chunk size and overlap\")\n", - "print(\" - Choose similarity top-k\")\n", - "print(\" - Select postprocessors and synthesizer\")\n", - "print(\"4. Enter a question and click 'Ask Question'\")\n", - "print(\"5. Review both the response and configuration used\")\n", - "print(\"\")\n", - "print(\"๐Ÿงช Experiments to try:\")\n", - "print(\"- Compare different models with the same question\")\n", - "print(\"- Test temperature effects (0.1 vs 0.9)\")\n", - "print(\"- Try different chunk sizes (256 vs 1024)\")\n", - "print(\"- Compare synthesizers (TreeSummarize vs Refine)\")\n", - "print(\"- Adjust similarity cutoff to filter results\")\n", - "\n", - "# Your code here:\n", - "advanced_interface.launch()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿ’ก Understanding the Configuration Options\n", - "\n", - "### Model Selection\n", - "- **gpt-4o**: Latest and most capable model, best quality responses\n", - "- **gpt-4o-mini**: Faster and cheaper while maintaining good quality\n", - "\n", - "### Temperature (0.0 - 1.0)\n", - "- **0.0-0.3**: Deterministic, factual responses\n", - "- **0.4-0.7**: Balanced creativity and accuracy\n", - "- **0.8-1.0**: More creative and varied responses\n", - "\n", - "### Chunk Size & Overlap\n", - "- **Chunk Size**: How much text to process at once (256-1024 typical)\n", - "- **Chunk Overlap**: Overlap between chunks to maintain context (10-100 typical)\n", - "\n", - "### Similarity Top-K (1-20)\n", - "- **Lower values (3-5)**: More focused, faster responses\n", - "- **Higher values (8-15)**: More comprehensive, detailed responses\n", - "\n", - "### Node Postprocessors\n", - "- **SimilarityPostprocessor**: Filters out low-relevance documents\n", - "\n", - "### Similarity Cutoff (0.0-1.0)\n", - "- **0.1-0.3**: More permissive, includes potentially relevant docs\n", - "- **0.5-0.8**: More strict, only highly relevant docs\n", - "\n", - "### Response Synthesizers\n", - "- **TreeSummarize**: Hierarchical summarization, good for complex topics\n", - "- **Refine**: Iterative refinement, builds detailed responses\n", - "- **CompactAndRefine**: Efficient version of Refine\n", - "- **Default**: Standard synthesis approach\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## โœ… Assignment Completion Checklist\n", - "\n", - "Before submitting, ensure you have:\n", - "\n", - "- [ ] Set up your OPENROUTER_API_KEY environment variable\n", - "- [ ] Imported all necessary libraries including advanced RAG components\n", - "- [ ] Created AdvancedRAGBackend class with configurable parameters\n", - "- [ ] Implemented all required methods:\n", - " - [ ] `update_settings()` - Updates LLM and chunking parameters\n", - " - [ ] `initialize_database()` - Sets up vector database\n", - " - [ ] `get_postprocessor()` - Returns selected postprocessor\n", - " - [ ] `get_synthesizer()` - Returns selected synthesizer\n", - " - [ ] `advanced_query()` - Handles queries with all configuration options\n", - "- [ ] Created advanced Gradio interface with all required components:\n", - " - [ ] Initialize database button\n", - " - [ ] Model selection dropdown (gpt-4o, gpt-4o-mini)\n", - " - [ ] Temperature slider (0 to 1, step 0.1)\n", - " - [ ] Chunk size input (default 512)\n", - " - [ ] Chunk overlap input (default 50)\n", - " - [ ] Similarity top-k slider (1 to 20, default 5)\n", - " - [ ] Node postprocessor multiselect\n", - " - [ ] Similarity cutoff slider (0.0 to 1.0, step 0.1, default 0.3)\n", - " - [ ] Response synthesizer dropdown\n", - " - [ ] Query input and submit button\n", - " - [ ] Response output\n", - " - [ ] Configuration display\n", - "- [ ] Connected all components to backend functions\n", - "- [ ] Successfully launched the application\n", - "- [ ] Tested different parameter combinations\n", - "- [ ] Verified all configuration options work correctly\n", - "\n", - "## ๐ŸŽŠ Congratulations!\n", - "\n", - "You've successfully built a professional, production-ready RAG application! You now have:\n", - "\n", - "- **Advanced Parameter Control**: Full control over all RAG system parameters\n", - "- **Professional UI**: Clean, organized interface with proper layout\n", - "- **Real-time Configuration**: Ability to experiment with different settings\n", - "- **Production Patterns**: Understanding of how to build scalable AI applications\n", - "\n", - "## ๐Ÿš€ Next Steps & Extensions\n", - "\n", - "**Potential Enhancements:**\n", - "1. **Authentication**: Add user login and session management\n", - "2. **Document Upload**: Allow users to upload their own documents\n", - "3. **Chat History**: Implement conversation memory\n", - "4. **Performance Monitoring**: Add response time and quality metrics\n", - "5. **A/B Testing**: Compare different configurations side-by-side\n", - "6. **Export Features**: Download responses and configurations\n", - "7. **Advanced Visualizations**: Show document similarity scores and retrieval paths\n", - "\n", - "**Deployment Options:**\n", - "- **Local**: Run on your machine for development\n", - "- **Gradio Cloud**: Deploy with `interface.launch(share=True)`\n", - "- **Hugging Face Spaces**: Deploy to Hugging Face for public access\n", - "- **Docker**: Containerize for scalable deployment\n", - "- **Cloud Platforms**: Deploy to AWS, GCP, or Azure\n", - "\n", - "You're now ready to build sophisticated AI-powered applications!\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/data/AI_Agent_Frameworks.pdf b/Girish_Basavaraj_Hiremath/session_2/data/AI_Agent_Frameworks.pdf deleted file mode 100644 index 36f4cb7..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/AI_Agent_Frameworks.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/Emerging_Agent_Architectures.pdf b/Girish_Basavaraj_Hiremath/session_2/data/Emerging_Agent_Architectures.pdf deleted file mode 100644 index b16b0b6..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/Emerging_Agent_Architectures.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/audio/ai_agents.mp3 b/Girish_Basavaraj_Hiremath/session_2/data/audio/ai_agents.mp3 deleted file mode 100644 index 8d13e31..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/audio/ai_agents.mp3 and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/audio/in_the_end.mp3 b/Girish_Basavaraj_Hiremath/session_2/data/audio/in_the_end.mp3 deleted file mode 100644 index f708431..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/audio/in_the_end.mp3 and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/audio/rags.mp3 b/Girish_Basavaraj_Hiremath/session_2/data/audio/rags.mp3 deleted file mode 100644 index 1bed69c..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/audio/rags.mp3 and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/cooking/images/recipe_popularity.png b/Girish_Basavaraj_Hiremath/session_2/data/cooking/images/recipe_popularity.png deleted file mode 100644 index ec52e3a..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/cooking/images/recipe_popularity.png and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/cooking/italian_recipes.csv b/Girish_Basavaraj_Hiremath/session_2/data/cooking/italian_recipes.csv deleted file mode 100644 index 2f41d43..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/cooking/italian_recipes.csv +++ /dev/null @@ -1,11 +0,0 @@ -recipe_name,cuisine_type,prep_time_minutes,difficulty,main_ingredient,calories_per_serving -Spaghetti Carbonara,Italian,20,Easy,Pasta,450 -Margherita Pizza,Italian,45,Medium,Tomato,320 -Risotto Milanese,Italian,35,Hard,Rice,380 -Tiramisu,Italian,30,Medium,Mascarpone,295 -Bruschetta,Italian,10,Easy,Tomato,180 -Osso Buco,Italian,120,Hard,Veal,520 -Panna Cotta,Italian,15,Easy,Cream,240 -Lasagna Bolognese,Italian,90,Hard,Pasta,485 -Minestrone Soup,Italian,40,Medium,Vegetables,165 -Gelato,Italian,25,Medium,Milk,210 diff --git a/Girish_Basavaraj_Hiremath/session_2/data/cooking/recipe_instructions.md b/Girish_Basavaraj_Hiremath/session_2/data/cooking/recipe_instructions.md deleted file mode 100644 index d45212f..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/cooking/recipe_instructions.md +++ /dev/null @@ -1,54 +0,0 @@ -# ๐Ÿ Classic Spaghetti Carbonara Recipe - -## Ingredients -- 400g spaghetti pasta -- 4 large egg yolks -- 100g pecorino romano cheese (grated) -- 150g guanciale or pancetta (diced) -- Black pepper (freshly ground) -- Salt for pasta water - -## Instructions - -### Step 1: Prepare the Sauce -1. In a large bowl, whisk together egg yolks and grated pecorino cheese -2. Add plenty of freshly ground black pepper -3. Mix until smooth and creamy (no lumps) - -### Step 2: Cook the Guanciale -1. Heat a large pan over medium heat -2. Add diced guanciale (no oil needed) -3. Cook until crispy and golden (about 5-7 minutes) -4. Reserve the rendered fat in the pan - -### Step 3: Cook the Pasta -1. Bring a large pot of salted water to boil -2. Add spaghetti and cook until al dente (about 10-12 minutes) -3. Reserve 1 cup of pasta water before draining - -### Step 4: Combine Everything -1. Add hot, drained pasta to the pan with guanciale -2. Remove from heat immediately -3. Quickly toss pasta with the rendered fat -4. Add egg mixture and toss vigorously -5. Add pasta water gradually until creamy consistency - -### Step 5: Serve -1. Serve immediately in warm bowls -2. Top with extra pecorino and black pepper -3. Enjoy while hot! - -## Chef's Tips -- **Temperature Control**: Never let eggs scramble - work off heat -- **Pasta Water**: The starchy water is crucial for emulsification -- **Cheese Quality**: Use real Pecorino Romano, not Parmesan -- **Timing**: Have everything ready - this dish waits for no one! - -## Nutritional Information (per serving) -- **Calories**: 450 -- **Protein**: 18g -- **Carbohydrates**: 52g -- **Fat**: 19g -- **Prep Time**: 15 minutes -- **Cook Time**: 20 minutes -- **Serves**: 4 people diff --git a/Girish_Basavaraj_Hiremath/session_2/data/csv/agent_evaluation_metrics.csv b/Girish_Basavaraj_Hiremath/session_2/data/csv/agent_evaluation_metrics.csv deleted file mode 100644 index c730064..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/csv/agent_evaluation_metrics.csv +++ /dev/null @@ -1,16 +0,0 @@ -test_case,task_type,agent_name,success_rate,avg_completion_time,error_count,user_satisfaction -web_scraping,data_collection,AutoGPT,0.85,45.2,3,4.2 -code_generation,development,GPT-Engineer,0.92,120.5,1,4.7 -research_task,information_gathering,CrewAI-Researcher,0.88,180.3,2,4.5 -math_problem,reasoning,ReAct-GPT4,0.91,25.8,1,4.6 -customer_support,conversation,Claude-Agent,0.89,15.2,2,4.4 -data_analysis,analysis,PaLM-Agent,0.86,95.7,4,4.3 -task_planning,planning,SuperAGI,0.84,75.4,3,4.1 -document_summarization,text_processing,Llama-Agent,0.87,32.1,2,4.3 -image_description,multimodal,OpenAI-Assistant,0.83,18.6,5,4.0 -email_automation,automation,AgentGPT,0.90,12.3,1,4.5 -database_query,data_retrieval,LangChain-Agent,0.94,8.7,0,4.8 -creative_writing,generation,Mixtral-Agent,0.82,156.9,3,4.2 -translation,language,Claude-Agent,0.91,22.4,1,4.6 -scheduling,coordination,MetaGPT,0.88,67.2,2,4.4 -debugging,code_analysis,GPT-Engineer,0.89,89.3,3,4.3 diff --git a/Girish_Basavaraj_Hiremath/session_2/data/csv/agent_performance_benchmark.csv b/Girish_Basavaraj_Hiremath/session_2/data/csv/agent_performance_benchmark.csv deleted file mode 100644 index 8ed571e..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/csv/agent_performance_benchmark.csv +++ /dev/null @@ -1,16 +0,0 @@ -agent_name,agent_type,accuracy_score,speed_ms,memory_usage_mb,cost_per_query,framework -ReAct-GPT4,reasoning,0.87,1200,45.2,0.02,langchain -AutoGPT,autonomous,0.78,2100,78.5,0.035,autogpt -LangChain-Agent,tool_using,0.82,950,32.1,0.015,langchain -CrewAI-Researcher,collaborative,0.85,1800,56.3,0.025,crewai -BabyAGI,goal_oriented,0.76,2400,92.1,0.04,babyagi -GPT-Engineer,code_generation,0.88,1600,41.7,0.028,gpt_engineer -MetaGPT,multi_agent,0.84,1950,67.8,0.032,metagpt -Camel-Agent,conversational,0.79,1100,38.4,0.018,camel -AgentGPT,web_automation,0.81,1750,52.6,0.022,agentgpt -SuperAGI,planning,0.83,1450,49.3,0.026,superagi -OpenAI-Assistant,general,0.86,800,28.9,0.012,openai -Claude-Agent,analytical,0.85,1050,35.7,0.02,anthropic -PaLM-Agent,reasoning,0.84,1300,44.1,0.024,google -Llama-Agent,open_source,0.80,1400,50.2,0.008,meta -Mixtral-Agent,mixture_expert,0.82,1600,58.4,0.015,mistral diff --git a/Girish_Basavaraj_Hiremath/session_2/data/finance/images/stock_performance.png b/Girish_Basavaraj_Hiremath/session_2/data/finance/images/stock_performance.png deleted file mode 100644 index 467c0f7..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/finance/images/stock_performance.png and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/finance/investment_portfolio.csv b/Girish_Basavaraj_Hiremath/session_2/data/finance/investment_portfolio.csv deleted file mode 100644 index 598045f..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/finance/investment_portfolio.csv +++ /dev/null @@ -1,11 +0,0 @@ -asset_type,ticker_symbol,company_name,investment_amount_usd,current_value_usd,percentage_return,risk_level -Stock,AAPL,Apple Inc,10000,12500,25.0,Medium -Stock,GOOGL,Alphabet Inc,8000,9200,15.0,Medium -Stock,TSLA,Tesla Inc,5000,4200,-16.0,High -Bond,US10Y,US Treasury 10-Year,15000,14800,-1.3,Low -ETF,SPY,SPDR S&P 500 ETF,20000,22400,12.0,Medium -Crypto,BTC,Bitcoin,3000,4800,60.0,Very High -Stock,MSFT,Microsoft Corp,12000,14400,20.0,Medium -REIT,VNQ,Vanguard Real Estate ETF,7000,7350,5.0,Medium -Gold,GLD,SPDR Gold Trust,4000,3900,-2.5,Low -Stock,NVDA,NVIDIA Corp,6000,9000,50.0,High diff --git a/Girish_Basavaraj_Hiremath/session_2/data/finance/market_analysis.md b/Girish_Basavaraj_Hiremath/session_2/data/finance/market_analysis.md deleted file mode 100644 index db345f3..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/finance/market_analysis.md +++ /dev/null @@ -1,91 +0,0 @@ -# ๐Ÿ“ˆ Q3 2024 Market Analysis Report - -## Executive Summary - -The third quarter of 2024 showed mixed performance across different asset classes, with technology stocks leading gains while bonds faced continued pressure from interest rate concerns. - -## Key Market Movements - -### Technology Sector ๐Ÿ’ป -- **NVIDIA (NVDA)**: +47% driven by AI chip demand -- **Apple (AAPL)**: +12% on iPhone 16 launch success -- **Microsoft (MSFT)**: +8% supported by Azure cloud growth - -### Energy Sector โšก -- Oil prices fluctuated between $75-85 per barrel -- Renewable energy stocks gained 15% on average -- Natural gas remained volatile due to geopolitical tensions - -### Real Estate ๐Ÿ  -- Commercial real estate down 8% in major cities -- Residential market showing signs of stabilization -- REITs provided steady 4-6% returns - -## Economic Indicators - -### Interest Rates -- Federal Reserve held rates steady at 5.25-5.50% -- 10-year Treasury yield averaged 4.2% -- Mortgage rates declined slightly to 6.8% - -### Inflation Metrics -- Core CPI at 3.1% year-over-year -- Energy inflation moderated to 2.8% -- Housing costs remain elevated at 5.2% - -### Employment -- Unemployment rate: 3.8% -- Job openings: 9.2 million -- Labor force participation: 63.1% - -## Investment Recommendations - -### Conservative Investors ๐Ÿ›ก๏ธ -1. **Treasury Bills**: Safe haven with 5%+ yields -2. **High-grade Corporate Bonds**: 6-7% yields with credit quality -3. **Dividend Aristocrats**: Stable income with modest growth - -### Moderate Risk Investors โš–๏ธ -1. **S&P 500 Index Funds**: Diversified exposure to US markets -2. **International ETFs**: Geographic diversification -3. **Bond Ladders**: Interest rate risk management - -### Aggressive Investors ๐Ÿš€ -1. **Growth Stocks**: Technology and healthcare leaders -2. **Emerging Markets**: Higher growth potential -3. **Cryptocurrency**: Small allocation (2-5% max) - -## Sector Outlook Q4 2024 - -### Bullish Sectors -- **Healthcare**: Aging demographics driving demand -- **Technology**: AI revolution continuing -- **Utilities**: Defensive play with steady dividends - -### Bearish Sectors -- **Commercial Real Estate**: Work-from-home impact persists -- **Traditional Retail**: E-commerce competition intensifies -- **Regional Banks**: Credit quality concerns - -## Risk Factors - -### Geopolitical Risks -- US-China trade tensions -- Middle East conflicts affecting energy -- European economic slowdown - -### Economic Risks -- Persistent inflation pressures -- Consumer spending slowdown -- Credit market stress - -### Market Risks -- High valuations in tech sector -- Interest rate volatility -- Currency fluctuations - -## Conclusion - -While markets face headwinds from macro uncertainties, selective opportunities exist across asset classes. Investors should maintain diversified portfolios and consider their risk tolerance when making allocation decisions. - -**Disclaimer**: This analysis is for educational purposes only and not investment advice. Consult with financial professionals before making investment decisions. diff --git a/Girish_Basavaraj_Hiremath/session_2/data/health/fitness_tracker.html b/Girish_Basavaraj_Hiremath/session_2/data/health/fitness_tracker.html deleted file mode 100644 index 8ededb8..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/health/fitness_tracker.html +++ /dev/null @@ -1,90 +0,0 @@ - - - - - - Weekly Fitness Tracker - - - -

๐Ÿƒโ€โ™‚๏ธ Weekly Fitness Tracker

- -
-

Monday - Upper Body Strength

-
- Workout: Bench Press, Pull-ups, Shoulder Press
- Duration: 45 minutes
- Intensity: High -
-
- Calories Burned: 320
- Heart Rate Avg: 145 bpm
- Steps: 8,500 -
-
- -
-

Tuesday - Cardio

-
- Workout: Running, Cycling
- Duration: 60 minutes
- Intensity: Medium -
-
- Calories Burned: 450
- Heart Rate Avg: 160 bpm
- Steps: 12,000 -
-
- -
-

Wednesday - Lower Body Strength

-
- Workout: Squats, Deadlifts, Leg Press
- Duration: 50 minutes
- Intensity: High -
-
- Calories Burned: 380
- Heart Rate Avg: 150 bpm
- Steps: 7,800 -
-
- -
-

Thursday - Rest Day

-
- Activity: Light stretching, walking
- Duration: 20 minutes
- Intensity: Low -
-
- Calories Burned: 120
- Heart Rate Avg: 95 bpm
- Steps: 6,200 -
-
- -

๐Ÿ“Š Weekly Summary

-
    -
  • Total Workouts: 4
  • -
  • Total Calories Burned: 1,270
  • -
  • Average Heart Rate: 137 bpm
  • -
  • Total Steps: 34,500
  • -
  • Weight Progress: -2.1 lbs
  • -
- -

๐ŸŽฏ Next Week Goals

-
    -
  • Add 15 minutes to cardio sessions
  • -
  • Increase weight on compound exercises by 5%
  • -
  • Aim for 10,000+ steps daily
  • -
  • Try yoga for flexibility
  • -
- - diff --git a/Girish_Basavaraj_Hiremath/session_2/data/health/images/fitness_progress.png b/Girish_Basavaraj_Hiremath/session_2/data/health/images/fitness_progress.png deleted file mode 100644 index 44ce2c1..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/health/images/fitness_progress.png and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/html/agent_tutorial.html b/Girish_Basavaraj_Hiremath/session_2/data/html/agent_tutorial.html deleted file mode 100644 index 63b2f3d..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/html/agent_tutorial.html +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - Building Your First AI Agent - Tutorial - - - -

Building Your First AI Agent: Step-by-Step Tutorial

- -
- Goal: Create a simple AI agent that can answer questions about documents using RAG (Retrieval-Augmented Generation). -
- -
-

Step 1: Environment Setup

-

First, install the required packages:

-
- -pip install openai langchain llama-index - -
-
- -
-

Step 2: Create Document Loader

-

Load your documents using LlamaIndex:

-
- -from llama_index.core import SimpleDirectoryReader
-documents = SimpleDirectoryReader("./data").load_data() -
-
-
- -
-

Step 3: Build Vector Index

-

Create a searchable index from your documents:

-
- -from llama_index.core import VectorStoreIndex
-index = VectorStoreIndex.from_documents(documents) -
-
-
- -
-

Step 4: Create Query Engine

-

Set up the query interface:

-
- -query_engine = index.as_query_engine()
-response = query_engine.query("What are the main types of AI agents?") -
-
-
- -

Key Concepts

-
    -
  • RAG (Retrieval-Augmented Generation): Combines information retrieval with text generation
  • -
  • Vector Index: Enables semantic search through document embeddings
  • -
  • Query Engine: Processes user questions and generates responses
  • -
  • Document Loader: Handles various file formats (PDF, TXT, CSV, etc.)
  • -
- -

Best Practices

-
    -
  1. Use clear, specific questions for better results
  2. -
  3. Chunk large documents appropriately
  4. -
  5. Choose the right embedding model for your domain
  6. -
  7. Monitor and evaluate agent performance regularly
  8. -
- -

Next Steps

-

Once you have a basic agent working, consider:

-
    -
  • Adding memory capabilities
  • -
  • Integrating external tools and APIs
  • -
  • Implementing multi-agent collaboration
  • -
  • Fine-tuning for specific domains
  • -
- -
- Tip: Start simple and iterate. The most effective agents often have focused, well-defined capabilities rather than trying to do everything. -
- - diff --git a/Girish_Basavaraj_Hiremath/session_2/data/images/agent_performance_comparison.png b/Girish_Basavaraj_Hiremath/session_2/data/images/agent_performance_comparison.png deleted file mode 100644 index ed7cbfb..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/images/agent_performance_comparison.png and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/images/agent_types_comparison.png b/Girish_Basavaraj_Hiremath/session_2/data/images/agent_types_comparison.png deleted file mode 100644 index 83e3de3..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/images/agent_types_comparison.png and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/data/markdown/agent_framework_comparison.md b/Girish_Basavaraj_Hiremath/session_2/data/markdown/agent_framework_comparison.md deleted file mode 100644 index 5e65825..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/markdown/agent_framework_comparison.md +++ /dev/null @@ -1,83 +0,0 @@ -# AI Agent Framework Comparison - -## Overview - -This document provides a comprehensive comparison of popular AI agent frameworks available in 2024. - -## Framework Categories - -### 1. Autonomous Agents -- **AutoGPT**: Pioneer in autonomous task execution -- **BabyAGI**: Simplified approach to autonomous AI -- **AgentGPT**: Web-based autonomous agent platform - -### 2. Tool-Using Agents -- **LangChain**: Comprehensive framework for LLM applications -- **LlamaIndex**: Specialized in document understanding and RAG -- **Semantic Kernel**: Microsoft's approach to AI orchestration - -### 3. Multi-Agent Systems -- **CrewAI**: Collaborative agent teams -- **MetaGPT**: Multi-agent software development -- **AutoGen**: Microsoft's multi-agent conversation framework - -## Key Differences - -| Framework | Complexity | Learning Curve | Best Use Case | -|-----------|------------|----------------|---------------| -| LangChain | Medium | Moderate | General LLM apps | -| AutoGPT | High | Steep | Autonomous tasks | -| CrewAI | Medium | Easy | Team collaboration | -| LlamaIndex | Low | Easy | Document Q&A | - -## Code Examples - -### LangChain Agent Setup -```python -from langchain.agents import initialize_agent -from langchain.tools import Tool - -agent = initialize_agent( - tools=[search_tool, calculator_tool], - llm=llm, - agent_type="zero-shot-react-description" -) -``` - -### CrewAI Team Setup -```python -from crewai import Agent, Task, Crew - -researcher = Agent( - role='Researcher', - goal='Find relevant information', - backstory='Expert in information gathering' -) - -crew = Crew( - agents=[researcher], - tasks=[research_task], - verbose=True -) -``` - -## Performance Considerations - -- **Latency**: Single agents typically faster than multi-agent systems -- **Accuracy**: Multi-agent systems often more accurate for complex tasks -- **Cost**: More agents = higher API costs -- **Reliability**: Simpler frameworks generally more stable - -## Choosing the Right Framework - -1. **For beginners**: Start with LlamaIndex or simple LangChain -2. **For complex tasks**: Consider AutoGPT or multi-agent systems -3. **For production**: LangChain or Semantic Kernel offer better stability -4. **For research**: Experiment with cutting-edge frameworks like MetaGPT - -## Resources - -- [LangChain Documentation](https://docs.langchain.com) -- [LlamaIndex Guide](https://docs.llamaindex.ai) -- [CrewAI Examples](https://github.com/joaomdmoura/crewAI) -- [Agent Development Best Practices](https://example.com/best-practices) diff --git a/Girish_Basavaraj_Hiremath/session_2/data/travel/city_guides.md b/Girish_Basavaraj_Hiremath/session_2/data/travel/city_guides.md deleted file mode 100644 index 709847c..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/data/travel/city_guides.md +++ /dev/null @@ -1,44 +0,0 @@ -# Ultimate City Travel Guide - -## Paris, France ๐Ÿ‡ซ๐Ÿ‡ท - -**Best Time to Visit:** April-June, September-October -**Must-See Attractions:** -- Eiffel Tower - Iconic iron lattice tower -- Louvre Museum - World's largest art museum -- Notre-Dame Cathedral - Gothic masterpiece -- Champs-ร‰lysรฉes - Famous shopping avenue - -**Local Cuisine:** Croissants, Escargot, Coq au Vin, Macarons -**Transportation:** Metro system, Vรฉlib bike sharing -**Budget:** โ‚ฌ100-150 per day for mid-range travel - ---- - -## Tokyo, Japan ๐Ÿ‡ฏ๐Ÿ‡ต - -**Best Time to Visit:** March-May (cherry blossoms), September-November -**Must-See Attractions:** -- Senso-ji Temple - Ancient Buddhist temple -- Shibuya Crossing - World's busiest pedestrian crossing -- Tokyo Skytree - Tallest tower in Japan -- Tsukiji Fish Market - Famous tuna auctions - -**Local Cuisine:** Sushi, Ramen, Tempura, Yakitori -**Transportation:** JR Pass, extensive train network -**Budget:** ยฅ12,000-18,000 per day for mid-range travel - ---- - -## New York City, USA ๐Ÿ‡บ๐Ÿ‡ธ - -**Best Time to Visit:** April-June, September-November -**Must-See Attractions:** -- Statue of Liberty - Symbol of freedom -- Central Park - Urban oasis -- Times Square - Bright lights and Broadway -- Brooklyn Bridge - Historic suspension bridge - -**Local Cuisine:** Pizza, Bagels, Cheesecake, Hot Dogs -**Transportation:** Subway system, yellow taxis, Uber -**Budget:** $150-250 per day for mid-range travel diff --git a/Girish_Basavaraj_Hiremath/session_2/data/travel/images/city_temperatures.png b/Girish_Basavaraj_Hiremath/session_2/data/travel/images/city_temperatures.png deleted file mode 100644 index c474618..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/data/travel/images/city_temperatures.png and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/01_academic_papers_rag.ipynb b/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/01_academic_papers_rag.ipynb deleted file mode 100644 index 2350d5d..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/01_academic_papers_rag.ipynb +++ /dev/null @@ -1,3270 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Academic Papers RAG System Tutorial\n", - "\n", - "This notebook demonstrates how to build a complete RAG (Retrieval-Augmented Generation) system for academic research papers using LlamaIndex. We'll build it step by step with independent functions that you can run and understand individually.\n", - "\n", - "## What is RAG?\n", - "\n", - "RAG combines the power of:\n", - "- **Retrieval**: Finding relevant documents from a knowledge base\n", - "- **Augmented Generation**: Using retrieved context to generate informed responses\n", - "\n", - "## System Components\n", - "\n", - "Our RAG system will include:\n", - "1. **PDF Processing**: Extract text from academic papers\n", - "2. **Document Chunking**: Split documents into searchable segments\n", - "3. **Vector Embeddings**: Convert text to numerical representations\n", - "4. **Vector Storage**: Store embeddings in LanceDB\n", - "5. **Semantic Search**: Find relevant content for queries\n", - "6. **Query Engine**: Generate responses using retrieved context\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿ—๏ธ Storage Architecture: Why StorageContext Matters\n", - "\n", - "This notebook uses LlamaIndex's **StorageContext** approach, which provides significant advantages over simpler vector-only storage methods. Understanding this architecture is crucial for building production-ready RAG systems.\n", - "\n", - "### ๐Ÿ“Š StorageContext vs. Simple Vector Storage\n", - "\n", - "| Component | StorageContext (This Notebook) | Simple Vector Store | Benefits |\n", - "|-----------|-------------------------------|-------------------|----------|\n", - "| **Vector Store** | โœ… LanceDB embeddings | โœ… LanceDB embeddings | Fast similarity search |\n", - "| **Document Store** | โœ… Original documents preserved | โŒ Lost after processing | Full document access |\n", - "| **Index Store** | โœ… Index metadata & structure | โŒ Must rebuild index | Exact reconstruction |\n", - "| **Graph Store** | โœ… Document relationships | โŒ No relationship data | Rich context understanding |\n", - "\n", - "### ๐Ÿ”„ Persistence & Recovery Capabilities\n", - "\n", - "**With StorageContext (Our Approach):**\n", - "```python\n", - "# Save complete system state\n", - "index.storage_context.persist(persist_dir=\"storage/papers_index\")\n", - "\n", - "# Perfect restoration - identical behavior\n", - "storage_context = StorageContext.from_defaults(persist_dir=\"storage/papers_index\")\n", - "index = load_index_from_storage(storage_context)\n", - "# ๐ŸŽฏ Exact same results every time!\n", - "```\n", - "\n", - "**Simple Vector Store Only:**\n", - "```python\n", - "# Only vectors saved\n", - "vector_store = LanceDBVectorStore(uri=\"./vectors\")\n", - "\n", - "# Must recreate everything from scratch\n", - "index = VectorStoreIndex.from_vector_store(vector_store)\n", - "# โš ๏ธ May have different behavior, lost metadata\n", - "```\n", - "\n", - "### ๐Ÿ’ก Key Advantages of StorageContext\n", - "\n", - "1. **๐Ÿ”„ Perfect Reproducibility**: Identical results across sessions - critical for research and development\n", - "2. **๐Ÿ“ฆ Complete State Management**: All components preserved, not just vectors\n", - "3. **โšก Fast Startup**: No reprocessing needed - load existing index instantly\n", - "4. **๐Ÿ” Rich Metadata**: Document relationships, source tracking, and complex queries\n", - "5. **๐Ÿ› ๏ธ Development Friendly**: Iterate without rebuilding entire system\n", - "6. **๐ŸŽฏ Enterprise Ready**: Robust persistence for production deployments\n", - "\n", - "### ๐Ÿ“ˆ Storage Footprint Example\n", - "\n", - "For 1000 academic papers (~500MB original PDFs):\n", - "\n", - "**StorageContext Storage:**\n", - "```\n", - "storage/papers_index/\n", - "โ”œโ”€โ”€ docstore.json # 50MB - Original documents\n", - "โ”œโ”€โ”€ index_store.json # 5MB - Index metadata \n", - "โ”œโ”€โ”€ graph_store.json # 2MB - Relationships\n", - "โ””โ”€โ”€ LanceDB vector files # 200MB - Embeddings\n", - "Total: ~260MB\n", - "```\n", - "\n", - "**Benefits**: Complete system restoration, full metadata, relationships preserved\n", - "\n", - "**Simple Vector Storage:**\n", - "```\n", - "lancedb_data/\n", - "โ””โ”€โ”€ vectors.lance # 200MB - Embeddings only\n", - "Total: ~200MB\n", - "```\n", - "\n", - "**Limitations**: Must rebuild index, lost metadata, no relationships\n", - "\n", - "### ๐ŸŽฏ When to Use StorageContext\n", - "\n", - "โœ… **Research & Development** - Need reproducible experiments \n", - "โœ… **Complex Documents** - Rich metadata and relationships matter \n", - "โœ… **Production Systems** - Robust persistence and recovery required \n", - "โœ… **Academic Work** - Full traceability and citation tracking \n", - "โœ… **Multi-user Systems** - Consistent experience across users \n", - "\n", - "### ๐Ÿš€ Performance Impact\n", - "\n", - "- **Initial Build**: ~20% slower (stores additional metadata)\n", - "- **Subsequent Loads**: 10x faster (no reprocessing needed)\n", - "- **Query Performance**: Identical to simple vector approach\n", - "- **Storage Space**: ~30% more storage for complete persistence\n", - "\n", - "This tutorial demonstrates the StorageContext approach because it provides the most robust and feature-complete RAG implementation suitable for real-world applications.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Environment Setup and Configuration\n", - "\n", - "First, let's set up our environment and load necessary configurations. We'll use OpenRouter for LLM access and local embeddings (no API keys needed for embeddings).\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install -r \"../requirements.txt\"" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Environment variables loaded successfully\n", - "Environment setup complete!\n" - ] - } - ], - "source": [ - "import os\n", - "import time\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Tuple\n", - "\n", - "from dotenv import load_dotenv\n", - "\n", - "def setup_environment():\n", - " \"\"\"\n", - " Setup environment variables and basic configuration.\n", - " \n", - " Returns:\n", - " bool: Success status\n", - " \"\"\"\n", - " # Load environment variables from .env file\n", - " load_dotenv()\n", - " \n", - " # Disable tokenizer warning\n", - " os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n", - " \n", - " # Check for required API key\n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " if not api_key:\n", - " print(\"โš ๏ธ OPENROUTER_API_KEY not found in environment variables\")\n", - " print(\"Please add your OpenRouter API key to a .env file\")\n", - " return False\n", - " \n", - " print(\"โœ“ Environment variables loaded successfully\")\n", - " return True\n", - "\n", - "# Run the setup\n", - "success = setup_environment()\n", - "if success:\n", - " print(\"Environment setup complete!\")\n", - "else:\n", - " print(\"Environment setup failed!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Configuration Management\n", - "\n", - "Let's define our system configuration directly in the notebook. This includes model settings, chunk sizes, and other parameters.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLM model: gpt-4o\n", - "Embedding model: local:BAAI/bge-small-en-v1.5\n", - "Chunk size: 1024\n", - "โœ“ Configuration setup complete\n" - ] - } - ], - "source": [ - "# Configuration parameters for the RAG system\n", - "CONFIG = {\n", - " \"llm\": {\n", - " \"model\": \"gpt-4o\", # OpenRouter model to use\n", - " \"temperature\": 0.1 # Temperature for response generation\n", - " },\n", - " \"embeddings\": {\n", - " \"model\": \"local:BAAI/bge-small-en-v1.5\", # Local embedding model (no API key needed)\n", - " \"chunk_size\": 1024, # Size of text chunks for processing\n", - " \"chunk_overlap\": 100 # Overlap between consecutive chunks\n", - " },\n", - " \"vector_store\": {\n", - " \"type\": \"lancedb\", # Vector database type\n", - " \"table_name\": \"academic_papers\", # Table name for storing embeddings\n", - " \"path\": \"storage/papers_vectordb\" # Path to vector database\n", - " },\n", - " \"index\": {\n", - " \"storage_path\": \"storage/papers_index\", # Path to store complete index\n", - " \"similarity_top_k\": 5 # Number of similar chunks to retrieve\n", - " },\n", - " \"papers\": {\n", - " \"folder\": \"../papers/agents\" # Path to academic papers folder\n", - " }\n", - "}\n", - "\n", - "def get_config(key_path: str, default_value=None):\n", - " \"\"\"\n", - " Get configuration value using dot notation.\n", - " \n", - " Args:\n", - " key_path (str): Dot-separated path to the config value (e.g., 'llm.model')\n", - " default_value: Default value if key not found\n", - " \n", - " Returns:\n", - " Configuration value or default\n", - " \"\"\"\n", - " keys = key_path.split('.')\n", - " value = CONFIG\n", - " \n", - " for key in keys:\n", - " if isinstance(value, dict) and key in value:\n", - " value = value[key]\n", - " else:\n", - " return default_value\n", - " \n", - " return value\n", - "\n", - "# Test configuration access\n", - "llm_model = get_config(\"llm.model\")\n", - "embedding_model = get_config(\"embeddings.model\")\n", - "chunk_size = get_config(\"embeddings.chunk_size\")\n", - "\n", - "print(f\"LLM model: {llm_model}\")\n", - "print(f\"Embedding model: {embedding_model}\")\n", - "print(f\"Chunk size: {chunk_size}\")\n", - "print(\"โœ“ Configuration setup complete\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. LlamaIndex Settings Configuration\n", - "\n", - "LlamaIndex uses global settings for embeddings, LLMs, and document processing. We'll use OpenRouter for the LLM and a local embedding model (no API key required).\n" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:47:37,701 - INFO - Load pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ LLM configured: gpt-4o (temperature: 0.1)\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:47:42,324 - INFO - 1 prompt is loaded, with the key: query\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Embedding model configured: local:BAAI/bge-small-en-v1.5\n", - "โœ“ Text chunking configured: 1024 chars with 100 overlap\n", - "โœ“ LlamaIndex settings configured successfully\n" - ] - } - ], - "source": [ - "from llama_index.core import Settings\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "from llama_index.core.embeddings import resolve_embed_model\n", - "from llama_index.core.node_parser import SentenceSplitter\n", - "\n", - "def configure_llamaindex_settings():\n", - " \"\"\"\n", - " Configure LlamaIndex global settings for embeddings, LLM, and text processing.\n", - " \"\"\"\n", - " # Set up LLM with OpenRouter\n", - " model = get_config(\"llm.model\")\n", - " temperature = get_config(\"llm.temperature\", 0.1)\n", - " \n", - " Settings.llm = OpenRouter(\n", - " api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n", - " model=model,\n", - " temperature=temperature\n", - " )\n", - " print(f\"โœ“ LLM configured: {model} (temperature: {temperature})\")\n", - "\n", - " # Set up local embedding model (downloads locally first time, then cached)\n", - " embedding_model = get_config(\"embeddings.model\")\n", - " Settings.embed_model = resolve_embed_model(embedding_model)\n", - " print(f\"โœ“ Embedding model configured: {embedding_model}\")\n", - "\n", - " # Set up node parser for chunking\n", - " chunk_size = get_config(\"embeddings.chunk_size\")\n", - " chunk_overlap = get_config(\"embeddings.chunk_overlap\")\n", - " \n", - " Settings.node_parser = SentenceSplitter(\n", - " chunk_size=chunk_size, \n", - " chunk_overlap=chunk_overlap\n", - " )\n", - " print(f\"โœ“ Text chunking configured: {chunk_size} chars with {chunk_overlap} overlap\")\n", - "\n", - "# Configure the settings using our hardcoded config\n", - "configure_llamaindex_settings()\n", - "print(\"โœ“ LlamaIndex settings configured successfully\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Vector Store Setup\n", - "\n", - "We'll use LanceDB as our vector database to store document embeddings. LanceDB is a fast, serverless vector database that's perfect for RAG applications.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Connected to LanceDB at: storage/papers_vectordb\n", - "โœ“ LanceDB vector store created (table: academic_papers)\n", - "โœ“ Vector store setup complete\n" - ] - } - ], - "source": [ - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "\n", - "def create_vector_store():\n", - " \"\"\"\n", - " Create and configure LanceDB vector store using config settings.\n", - " \n", - " Returns:\n", - " LanceDBVectorStore: Configured vector store\n", - " \"\"\"\n", - " try:\n", - " import lancedb\n", - " \n", - " # Get configuration values\n", - " vector_db_path = get_config(\"vector_store.path\")\n", - " table_name = get_config(\"vector_store.table_name\")\n", - " \n", - " # Create storage directory\n", - " Path(vector_db_path).parent.mkdir(parents=True, exist_ok=True)\n", - " \n", - " # Connect to LanceDB\n", - " db = lancedb.connect(str(vector_db_path))\n", - " print(f\"โœ“ Connected to LanceDB at: {vector_db_path}\")\n", - " \n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=str(vector_db_path), \n", - " table_name=table_name\n", - " )\n", - " print(f\"โœ“ LanceDB vector store created (table: {table_name})\")\n", - " \n", - " return vector_store\n", - " \n", - " except Exception as e:\n", - " print(f\"Error creating vector store: {e}\")\n", - " return None\n", - "\n", - "# Create the vector store using config\n", - "vector_store = create_vector_store()\n", - "if vector_store:\n", - " print(\"โœ“ Vector store setup complete\")\n", - "else:\n", - " print(\"โŒ Vector store setup failed\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. PDF Processing and Document Loading\n", - "\n", - "Now we'll create functions to load and process PDF files. We'll use LlamaIndex's native `SimpleDirectoryReader` which can handle PDFs directly without needing a custom processor.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loading papers from: ../papers/agents\n", - "โœ“ Loaded 229 documents\n", - "Successfully loaded 229 documents\n", - "First document preview: AI Agents vs. Agentic AI: A Conceptual\n", - "Taxonomy, Applications and Challenges\n", - "Ranjan Sapkotaโˆ—โ€ก, Konstantinos I. Roumeliotis โ€ , Manoj Karkee โˆ—โ€ก\n", - "โˆ—Cornell University, Department of Environmental and Biolo...\n", - "First document metadata: {'page_label': '1', 'file_name': 'AI_Agents_vs_Agentic_AI.pdf', 'file_path': '/Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf', 'file_type': 'application/pdf', 'file_size': 3196781, 'creation_date': '2025-09-20', 'last_modified_date': '2025-09-20'}\n" - ] - } - ], - "source": [ - "from llama_index.core import SimpleDirectoryReader\n", - "\n", - "def load_papers_from_folder() -> List:\n", - " \"\"\"\n", - " Load and process all PDF papers from the configured folder using LlamaIndex's native loader.\n", - " \n", - " Returns:\n", - " List[Document]: Processed documents ready for indexing\n", - " \"\"\"\n", - " papers_folder = get_config(\"papers.folder\")\n", - " print(f\"Loading papers from: {papers_folder}\")\n", - " \n", - " papers_path = Path(papers_folder)\n", - " if not papers_path.exists():\n", - " print(f\"Papers folder does not exist: {papers_path}\")\n", - " return []\n", - " \n", - " # Use LlamaIndex's SimpleDirectoryReader to load PDFs\n", - " # This natively handles PDF parsing, text extraction, and metadata\n", - " documents = SimpleDirectoryReader(papers_folder).load_data()\n", - " \n", - " print(f\"โœ“ Loaded {len(documents)} documents\")\n", - " return documents\n", - "\n", - "# Load the papers using config\n", - "documents = load_papers_from_folder()\n", - "if documents:\n", - " print(f\"Successfully loaded {len(documents)} documents\")\n", - " print(f\"First document preview: {documents[0].text[:200]}...\")\n", - " print(f\"First document metadata: {documents[0].metadata}\")\n", - "else:\n", - " print(\"No documents loaded\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Creating the Vector Index\n", - "\n", - "The vector index is the core of our RAG system. It chunks documents, generates embeddings, and stores them in the vector database for efficient similarity search.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:47:57,839 - INFO - Loading all indices.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“ Loading existing index...\n", - "Loading llama_index.core.storage.kvstore.simple_kvstore from storage/papers_index/docstore.json.\n", - "Loading llama_index.core.storage.kvstore.simple_kvstore from storage/papers_index/index_store.json.\n", - "โœ“ Successfully loaded existing index\n", - "โœ“ Vector index ready for querying\n" - ] - } - ], - "source": [ - "from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n", - "\n", - "def create_vector_index(documents: List, \n", - " vector_store, \n", - " force_rebuild: bool = False):\n", - " \"\"\"\n", - " Create or load a vector index from documents using config settings.\n", - " \n", - " Args:\n", - " documents (List): Documents to index\n", - " vector_store: LanceDB vector store\n", - " force_rebuild (bool): Force rebuild even if index exists\n", - " \n", - " Returns:\n", - " VectorStoreIndex: The created or loaded index\n", - " \"\"\"\n", - " index_storage_path = get_config(\"index.storage_path\")\n", - " index_path = Path(index_storage_path)\n", - " index_path.mkdir(parents=True, exist_ok=True)\n", - " \n", - " # Check if index already exists\n", - " index_store_file = index_path / \"index_store.json\"\n", - " \n", - " if not force_rebuild and index_store_file.exists():\n", - " print(\"๐Ÿ“ Loading existing index...\")\n", - " try:\n", - " # Recreate storage context with vector store\n", - " storage_context = StorageContext.from_defaults(\n", - " persist_dir=str(index_path), \n", - " vector_store=vector_store\n", - " )\n", - " \n", - " # Load existing index\n", - " index = load_index_from_storage(storage_context)\n", - " print(\"โœ“ Successfully loaded existing index\")\n", - " return index\n", - " \n", - " except Exception as e:\n", - " print(f\"โš ๏ธ Error loading existing index: {e}\")\n", - " print(\"Creating new index...\")\n", - " \n", - " if not documents:\n", - " print(\"โŒ No documents to index\")\n", - " return None\n", - " \n", - " print(\"๐Ÿ”จ Creating new vector index...\")\n", - " start_time = time.time()\n", - " \n", - " # Create storage context with vector store\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " \n", - " # Create index with progress bar\n", - " index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context, \n", - " show_progress=True\n", - " )\n", - " \n", - " end_time = time.time()\n", - " print(f\"โœ“ Index created in {end_time - start_time:.2f} seconds\")\n", - " \n", - " # Save index to storage\n", - " print(\"๐Ÿ’พ Saving index to storage...\")\n", - " index.storage_context.persist(persist_dir=str(index_path))\n", - " print(\"โœ“ Index saved successfully\")\n", - " \n", - " return index\n", - "\n", - "# Create the vector index using config\n", - "index = create_vector_index(\n", - " documents=documents, \n", - " vector_store=vector_store, \n", - " force_rebuild=False # Set to True to force rebuild\n", - ")\n", - "\n", - "if index:\n", - " print(\"โœ“ Vector index ready for querying\")\n", - "else:\n", - " print(\"โŒ Failed to create vector index\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7. Setting Up the Query Engine\n", - "\n", - "The query engine combines a retriever (to find relevant documents) with an LLM (to generate responses). This is where the \"Augmented Generation\" part of RAG happens.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Retriever configured to find top 5 similar chunks\n", - "โœ“ Query engine setup successfully\n", - "๐Ÿš€ RAG system is ready for queries!\n" - ] - } - ], - "source": [ - "from llama_index.core.query_engine import RetrieverQueryEngine\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "\n", - "def setup_query_engine(index):\n", - " \"\"\"\n", - " Setup the query engine for semantic search and response generation using config settings.\n", - " \n", - " Args:\n", - " index: The vector index to query\n", - " \n", - " Returns:\n", - " RetrieverQueryEngine: Configured query engine\n", - " \"\"\"\n", - " if not index:\n", - " print(\"โŒ Index not available. Please create index first.\")\n", - " return None\n", - " \n", - " try:\n", - " # Get similarity top k from config\n", - " similarity_top_k = get_config(\"index.similarity_top_k\")\n", - " \n", - " # Create retriever - this finds the most similar document chunks\n", - " retriever = VectorIndexRetriever(\n", - " index=index,\n", - " similarity_top_k=similarity_top_k,\n", - " )\n", - " print(f\"โœ“ Retriever configured to find top {similarity_top_k} similar chunks\")\n", - " \n", - " # Create query engine - this combines retrieval with LLM generation\n", - " query_engine = RetrieverQueryEngine(retriever=retriever)\n", - " print(\"โœ“ Query engine setup successfully\")\n", - " \n", - " return query_engine\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error setting up query engine: {e}\")\n", - " return None\n", - "\n", - "# Setup the query engine using config\n", - "query_engine = setup_query_engine(index)\n", - "\n", - "if query_engine:\n", - " print(\"๐Ÿš€ RAG system is ready for queries!\")\n", - "else:\n", - " print(\"โŒ Failed to setup query engine\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“ Title and Author extraction functions loaded successfully!\n" - ] - } - ], - "source": [ - "def extract_paper_title_from_text(text: str, max_length: int = 200) -> str:\n", - " \"\"\"\n", - " Extract the paper title from the document text.\n", - " \n", - " Args:\n", - " text (str): Document text content\n", - " max_length (int): Maximum length for title extraction\n", - " \n", - " Returns:\n", - " str: Extracted title or fallback\n", - " \"\"\"\n", - " if not text:\n", - " return \"Unknown Title\"\n", - " \n", - " # Split into lines and clean them\n", - " lines = [line.strip() for line in text.split('\\n') if line.strip()]\n", - " \n", - " if not lines:\n", - " return \"Unknown Title\"\n", - " \n", - " # Look for title patterns - usually the first substantial line\n", - " # Skip very short lines, page numbers, headers\n", - " for line in lines[:10]: # Check first 10 lines\n", - " # Skip lines that look like headers, page numbers, or metadata\n", - " if (len(line) > 15 and \n", - " not line.isdigit() and \n", - " not line.startswith(('Page', 'arXiv:', 'doi:', 'http', 'www')) and\n", - " not all(c.isupper() or c.isspace() for c in line) and # Skip all-caps headers\n", - " '.' in line or len(line) > 30): # Likely a title if it has punctuation or is long\n", - " \n", - " # Clean up the title\n", - " title = line.strip()\n", - " \n", - " # Remove common prefixes/suffixes\n", - " prefixes_to_remove = ['Title:', 'Abstract:', 'Paper:', 'Research:']\n", - " for prefix in prefixes_to_remove:\n", - " if title.startswith(prefix):\n", - " title = title[len(prefix):].strip()\n", - " \n", - " # Truncate if too long\n", - " if len(title) > max_length:\n", - " title = title[:max_length].strip() + \"...\"\n", - " \n", - " return title\n", - " \n", - " # Fallback: use first non-empty line, truncated\n", - " first_line = lines[0] if lines else \"Unknown Title\"\n", - " if len(first_line) > max_length:\n", - " first_line = first_line[:max_length].strip() + \"...\"\n", - " \n", - " return first_line\n", - "\n", - "def extract_paper_authors_from_text(text: str) -> str:\n", - " \"\"\"\n", - " Extract authors from the document text.\n", - " \n", - " Args:\n", - " text (str): Document text content\n", - " \n", - " Returns:\n", - " str: Extracted authors or \"Unknown Authors\"\n", - " \"\"\"\n", - " if not text:\n", - " return \"Unknown Authors\"\n", - " \n", - " lines = [line.strip() for line in text.split('\\n') if line.strip()]\n", - " \n", - " # Look for author patterns in first 20 lines\n", - " for i, line in enumerate(lines[:20]):\n", - " # Skip the title line (usually first substantial line)\n", - " if i == 0:\n", - " continue\n", - " \n", - " # Look for author patterns\n", - " if (len(line) > 5 and \n", - " not line.isdigit() and\n", - " not line.startswith(('Abstract', 'Introduction', 'Page', 'arXiv:', 'doi:', 'http')) and\n", - " ('University' in line or 'Institute' in line or \n", - " ',' in line or 'Department' in line or\n", - " '@' in line or # Email addresses often indicate authors\n", - " any(char.isupper() for char in line))): # Names often have capitals\n", - " \n", - " # Clean up author line\n", - " authors = line.strip()\n", - " \n", - " # Remove common prefixes\n", - " prefixes_to_remove = ['Authors:', 'By:', 'Author:']\n", - " for prefix in prefixes_to_remove:\n", - " if authors.startswith(prefix):\n", - " authors = authors[len(prefix):].strip()\n", - " \n", - " # Truncate if too long\n", - " if len(authors) > 150:\n", - " authors = authors[:150].strip() + \"...\"\n", - " \n", - " return authors\n", - " \n", - " return \"Unknown Authors\"\n", - "\n", - "print(\"๐Ÿ“ Title and Author extraction functions loaded successfully!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 8. Search and Query Functions\n", - "\n", - "Now let's create functions to search through our academic papers and extract detailed information about sources and metadata.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ”„ Re-processing papers with improved title extraction...\n", - "๐Ÿ“š Found 5 unique papers in the index:\n", - "================================================================================\n", - "1. ๐Ÿ“„ AI Agents vs. Agentic AI: A Conceptual\n", - " ๐Ÿ‘ฅ Authors: Taxonomy, Applications and Challenges\n", - " ๐Ÿ“ File: AI_Agents_vs_Agentic_AI\n", - " ๐Ÿ’พ Size: 3.05 MB\n", - " ๐Ÿ“– Page: 1\n", - "------------------------------------------------------------\n", - "2. ๐Ÿ“„ THE LANDSCAPE OF EMERGING AI AGENT ARCHITECTURES\n", - " ๐Ÿ‘ฅ Authors: FOR REASONING , PLANNING , AND TOOL CALLING : A S URVEY\n", - " ๐Ÿ“ File: Emerging_Agent_Architectures\n", - " ๐Ÿ’พ Size: 1.58 MB\n", - " ๐Ÿ“– Page: 1\n", - "------------------------------------------------------------\n", - "3. ๐Ÿ“„ From LLM Reasoning to Autonomous AI Agents:\n", - " ๐Ÿ‘ฅ Authors: From LLM Reasoning to Autonomous AI Agents:\n", - " ๐Ÿ“ File: LLMReasoning_to_Autonomous_Agents\n", - " ๐Ÿ’พ Size: 16.21 MB\n", - " ๐Ÿ“– Page: 1\n", - "------------------------------------------------------------\n", - "4. ๐Ÿ“„ The Rise and Potential of Large Language Model\n", - " ๐Ÿ‘ฅ Authors: Based Agents: A Survey\n", - " ๐Ÿ“ File: Rise_and_Potential_LLM_Agents\n", - " ๐Ÿ’พ Size: 6.52 MB\n", - " ๐Ÿ“– Page: 1\n", - "------------------------------------------------------------\n", - "5. ๐Ÿ“„ A Comprehensive Survey of Self-Evolving AI Agents\n", - " ๐Ÿ‘ฅ Authors: A New Paradigm Bridging Foundation Models and Lifelong Agentic Systems\n", - " ๐Ÿ“ File: survey_of_self_evolving_agents\n", - " ๐Ÿ’พ Size: 10.56 MB\n", - " ๐Ÿ“– Page: 1\n", - "------------------------------------------------------------\n", - "\n", - "โœ… Successfully extracted titles for 5 papers!\n" - ] - } - ], - "source": [ - "# Updated list_indexed_papers function with title extraction\n", - "def list_indexed_papers_improved(documents: List) -> List[Dict[str, any]]:\n", - " \"\"\"\n", - " List all papers that have been indexed with their metadata.\n", - " Extracts actual paper titles and authors from document content.\n", - " \n", - " Args:\n", - " documents (List): List of loaded documents\n", - " \n", - " Returns:\n", - " List[Dict[str, any]]: List of paper information\n", - " \"\"\"\n", - " papers = []\n", - " processed_files = set() # Track unique files to avoid duplicates\n", - " \n", - " for doc in documents:\n", - " try:\n", - " metadata = doc.metadata\n", - " file_path = metadata.get(\"file_path\", \"\")\n", - " file_name = Path(file_path).stem if file_path else \"Unknown\"\n", - " \n", - " # Skip if we've already processed this file\n", - " if file_path in processed_files:\n", - " continue\n", - " processed_files.add(file_path)\n", - " \n", - " # Extract title and authors from document text\n", - " doc_text = doc.text if hasattr(doc, 'text') else \"\"\n", - " extracted_title = extract_paper_title_from_text(doc_text)\n", - " extracted_authors = extract_paper_authors_from_text(doc_text)\n", - " \n", - " paper_info = {\n", - " \"file_name\": file_name,\n", - " \"file_path\": file_path,\n", - " \"title\": extracted_title,\n", - " \"authors\": extracted_authors,\n", - " \"page_count\": metadata.get(\"page_count\", 0),\n", - " \"file_size\": metadata.get(\"file_size\", 0),\n", - " \"file_size_mb\": round(metadata.get(\"file_size\", 0) / (1024 * 1024), 2) if metadata.get(\"file_size\") else 0,\n", - " \"total_pages\": metadata.get(\"total_pages\", \"Unknown\"),\n", - " \"page_label\": metadata.get(\"page_label\", \"\"),\n", - " }\n", - " \n", - " papers.append(paper_info)\n", - " \n", - " except Exception as e:\n", - " print(f\"Error processing document: {e}\")\n", - " \n", - " return papers\n", - "\n", - "# Re-list papers with improved title extraction\n", - "print(\"๐Ÿ”„ Re-processing papers with improved title extraction...\")\n", - "papers_list_improved = list_indexed_papers_improved(documents)\n", - "\n", - "print(f\"๐Ÿ“š Found {len(papers_list_improved)} unique papers in the index:\")\n", - "print(\"=\" * 80)\n", - "\n", - "for i, paper in enumerate(papers_list_improved[:10], 1): # Show first 10 papers\n", - " print(f\"{i}. ๐Ÿ“„ {paper['title']}\")\n", - " print(f\" ๐Ÿ‘ฅ Authors: {paper['authors']}\")\n", - " print(f\" ๐Ÿ“ File: {paper['file_name']}\")\n", - " print(f\" ๐Ÿ’พ Size: {paper['file_size_mb']} MB\")\n", - " if paper.get('page_label'):\n", - " print(f\" ๐Ÿ“– Page: {paper['page_label']}\")\n", - " print(\"-\" * 60)\n", - "\n", - "print(f\"\\nโœ… Successfully extracted titles for {len(papers_list_improved)} papers!\")\n", - "if len(papers_list_improved) > 10:\n", - " print(f\"๐Ÿ“ Showing first 10 papers. Total: {len(papers_list_improved)} papers available.\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ” Enhanced search function with title extraction loaded!\n" - ] - } - ], - "source": [ - "# Enhanced search function with improved metadata extraction\n", - "def search_papers_improved(query_engine, query: str, include_metadata: bool = True) -> Dict[str, any]:\n", - " \"\"\"\n", - " Search for relevant papers based on the query with improved title extraction.\n", - " \n", - " Args:\n", - " query_engine: The configured query engine\n", - " query (str): Search query\n", - " include_metadata (bool): Whether to include detailed metadata\n", - " \n", - " Returns:\n", - " Dict[str, any]: Search results with response and sources\n", - " \"\"\"\n", - " if not query_engine:\n", - " return {\n", - " \"success\": False,\n", - " \"error\": \"Query engine not initialized.\",\n", - " \"response\": \"\",\n", - " \"sources\": [],\n", - " }\n", - " \n", - " try:\n", - " print(f\"๐Ÿ” Searching for: '{query}'\")\n", - " start_time = time.time()\n", - " \n", - " # Query the RAG system\n", - " response = query_engine.query(query)\n", - " \n", - " end_time = time.time()\n", - " \n", - " # Extract source information from retrieved nodes with title extraction\n", - " sources = []\n", - " if hasattr(response, \"source_nodes\"):\n", - " for node in response.source_nodes:\n", - " # Extract title from the node text\n", - " node_text = node.text if hasattr(node, 'text') else \"\"\n", - " extracted_title = extract_paper_title_from_text(node_text, max_length=100)\n", - " extracted_authors = extract_paper_authors_from_text(node_text)\n", - " \n", - " source_info = {\n", - " \"text\": (\n", - " node.text[:500] + \"...\"\n", - " if len(node.text) > 500\n", - " else node.text\n", - " ),\n", - " \"score\": getattr(node, \"score\", 0.0),\n", - " \"extracted_title\": extracted_title,\n", - " \"extracted_authors\": extracted_authors,\n", - " }\n", - " \n", - " # Add metadata if available and requested\n", - " if include_metadata and hasattr(node, \"metadata\"):\n", - " metadata = node.metadata\n", - " source_info.update({\n", - " \"file_name\": metadata.get(\"file_name\", \"Unknown\"),\n", - " \"file_path\": metadata.get(\"file_path\", \"\"),\n", - " \"page_label\": metadata.get(\"page_label\", \"\"),\n", - " \"file_size_mb\": round(metadata.get(\"file_size\", 0) / (1024 * 1024), 2) if metadata.get(\"file_size\") else 0,\n", - " })\n", - " \n", - " sources.append(source_info)\n", - " \n", - " result = {\n", - " \"success\": True,\n", - " \"response\": str(response),\n", - " \"sources\": sources,\n", - " \"query\": query,\n", - " \"search_time\": end_time - start_time,\n", - " \"num_sources\": len(sources),\n", - " }\n", - " \n", - " print(f\"โœ“ Search completed in {end_time - start_time:.2f} seconds\")\n", - " print(f\"๐Ÿ“š Found {len(sources)} relevant sources\")\n", - " \n", - " return result\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error during search: {e}\")\n", - " return {\"success\": False, \"error\": str(e), \"response\": \"\", \"sources\": []}\n", - "\n", - "print(\"๐Ÿ” Enhanced search function with title extraction loaded!\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿงช Testing improved search with title extraction:\n", - "================================================================================\n", - "โ“ Question: What are the main architectural patterns for agent systems?\n", - "================================================================================\n", - "๐Ÿ” Searching for: 'What are the main architectural patterns for agent systems?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:48:45,000 - INFO - query_type :, vector\n", - "2025-09-20 12:48:48,541 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:48:52,913 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:48:54,757 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 11.76 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "๐Ÿ’ก Answer:\n", - "Agent systems are typically structured around several key components that enable autonomous decision-making and execution. The core of an agent system is the Foundation Model, often a large language model (LLM), which acts as the central reasoning engine. This model interprets instructions, generates plans, and produces actionable responses. Supporting modules enhance the agent's capabilities in complex environments. These include the Perception Module, which processes sensory-like data to build a suitable representation for reasoning; the Planning Module, which decomposes tasks into actionable sub-tasks and guides their execution; and the Memory Module, which retains and recalls past experiences for context-aware reasoning and long-term consistency. These architectural patterns allow agents to effectively reason, plan, and interact with their environments.\n", - "\n", - "๐Ÿ“Š Search completed in 11.76 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "\n", - "๐Ÿ“– Source Details:\n", - "------------------------------------------------------------\n", - "\n", - "1. ๐Ÿ“„ Paper: AI enhances this base by integrating advanced components\n", - " ๐Ÿ‘ฅ Authors: AI enhances this base by integrating advanced components\n", - " ๐Ÿ“ File: AI_Agents_vs_Agentic_AI.pdf\n", - " ๐Ÿ“– Page: 11\n", - " ๐ŸŽฏ Relevance Score: 0.730\n", - " ๐Ÿ“ Text Preview: Agentic\n", - "AI enhances this base by integrating advanced components\n", - "such as Specialized Agents, Advanced Reasoning & Plan-\n", - "ning, Persistent Memory, and Orchestration. The figure further\n", - "emphasizes emerge...\n", - "\n", - "2. ๐Ÿ“„ Paper: Fig. 2: Mindmap of Research Questions relevant to AI\n", - " ๐Ÿ‘ฅ Authors: Agentic AI\n", - " ๐Ÿ“ File: AI_Agents_vs_Agentic_AI.pdf\n", - " ๐Ÿ“– Page: 3\n", - " ๐ŸŽฏ Relevance Score: 0.685\n", - " ๐Ÿ“ Text Preview: AI Agents\n", - "&\n", - "Agentic AI\n", - "Architecture\n", - "Mechanisms\n", - "Scope/\n", - "Complexity\n", - "Interaction\n", - "Autonomy\n", - "Fig. 2: Mindmap of Research Questions relevant to AI\n", - "Agents and Agentic AI. Each color-coded branch represents\n", - "a k...\n", - "\n", - "3. ๐Ÿ“„ Paper: tural distinction underpins profound differences in scalability,\n", - " ๐Ÿ‘ฅ Authors: tural distinction underpins profound differences in scalability,\n", - " ๐Ÿ“ File: AI_Agents_vs_Agentic_AI.pdf\n", - " ๐Ÿ“– Page: 2\n", - " ๐ŸŽฏ Relevance Score: 0.685\n", - " ๐Ÿ“ Text Preview: This architec-\n", - "tural distinction underpins profound differences in scalability,\n", - "adaptability, and application scope.\n", - "Understanding and formalizing the taxonomy between these\n", - "two paradigms (AI Agents a...\n", - "\n", - "4. ๐Ÿ“„ Paper: task. In single agent patterns there is no feedback mechanism from other AI agents; however, there m...\n", - " ๐Ÿ‘ฅ Authors: Multi-Agent Architectures. These architectures involve two or more agents, where each agent can utilize the same\n", - " ๐Ÿ“ File: Emerging_Agent_Architectures.pdf\n", - " ๐Ÿ“– Page: 3\n", - " ๐ŸŽฏ Relevance Score: 0.684\n", - " ๐Ÿ“ Text Preview: task. In single agent patterns there is no feedback mechanism from other AI agents; however, there may be options for\n", - "humans to provide feedback that guides the agent.\n", - "Multi-Agent Architectures. These...\n", - "\n", - "5. ๐Ÿ“„ Paper: agentic systems.\n", - " ๐Ÿ‘ฅ Authors: The remainder of this survey is organised as follows. Section 2 presents preliminaries on AI agents and\n", - " ๐Ÿ“ File: survey_of_self_evolving_agents.pdf\n", - " ๐Ÿ“– Page: 6\n", - " ๐ŸŽฏ Relevance Score: 0.668\n", - " ๐Ÿ“ Text Preview: agentic systems.\n", - "The remainder of this survey is organised as follows. Section 2 presents preliminaries on AI agents and\n", - "multi-agent systems, including their definitions, key components, representativ...\n" - ] - } - ], - "source": [ - "# Test the improved search with title extraction\n", - "def ask_question_improved(query_engine, question: str, show_sources: bool = True):\n", - " \"\"\"\n", - " Ask a custom question to the RAG system with improved title extraction.\n", - " \n", - " Args:\n", - " query_engine: The configured query engine\n", - " question (str): Your question about the papers\n", - " show_sources (bool): Whether to display source information\n", - " \"\"\"\n", - " print(f\"โ“ Question: {question}\")\n", - " print(\"=\" * 80)\n", - " \n", - " result = search_papers_improved(query_engine, question, include_metadata=True)\n", - " \n", - " if result[\"success\"]:\n", - " print(f\"๐Ÿ’ก Answer:\")\n", - " print(result[\"response\"])\n", - " print(f\"\\n๐Ÿ“Š Search completed in {result['search_time']:.2f} seconds\")\n", - " print(f\"๐Ÿ“š Found {result['num_sources']} relevant sources\")\n", - " \n", - " if show_sources and result[\"sources\"]:\n", - " print(f\"\\n๐Ÿ“– Source Details:\")\n", - " print(\"-\" * 60)\n", - " for i, source in enumerate(result[\"sources\"], 1):\n", - " print(f\"\\n{i}. ๐Ÿ“„ Paper: {source.get('extracted_title', 'Unknown Title')}\")\n", - " print(f\" ๐Ÿ‘ฅ Authors: {source.get('extracted_authors', 'Unknown Authors')}\")\n", - " print(f\" ๐Ÿ“ File: {source.get('file_name', 'Unknown')}\")\n", - " if source.get('page_label'):\n", - " print(f\" ๐Ÿ“– Page: {source['page_label']}\")\n", - " print(f\" ๐ŸŽฏ Relevance Score: {source.get('score', 0):.3f}\")\n", - " print(f\" ๐Ÿ“ Text Preview: {source['text'][:200]}...\")\n", - " \n", - " else:\n", - " print(f\"โŒ Error: {result['error']}\")\n", - "\n", - "# Test with the improved version\n", - "test_question = \"What are the main architectural patterns for agent systems?\"\n", - "\n", - "if query_engine:\n", - " print(\"๐Ÿงช Testing improved search with title extraction:\")\n", - " print(\"=\" * 80)\n", - " ask_question_improved(query_engine, test_question, show_sources=True)\n", - "else:\n", - " print(\"โŒ Query engine not available\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ” Searching for: 'What are the main types of AI agents discussed in these papers?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:49:54,494 - INFO - query_type :, vector\n", - "2025-09-20 12:49:57,093 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:00,620 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:04,316 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 12.60 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "\n", - "๐Ÿ“ Response Preview: The primary types of AI agents discussed are AI Agents and Agentic AI. AI Agents are characterized as modular systems focused on narrow, task-specific automation, leveraging large language models (LLMs) and language interface models (LIMs) for tasks such as customer support, email filtering, and personalized content recommendation. In contrast, Agentic AI represents a more advanced paradigm, featuring multi-agent collaboration, dynamic task decomposition, and orchestrated autonomy, with applications in areas like research automation, robotic coordination, and medical decision-making.\n", - "๐Ÿ“Š Number of sources: 5\n", - "โฑ๏ธ Search time: 12.60 seconds\n" - ] - } - ], - "source": [ - "def search_papers(query_engine, query: str, include_metadata: bool = True) -> Dict[str, any]:\n", - " \"\"\"\n", - " Search for relevant papers based on the query.\n", - " \n", - " Args:\n", - " query_engine: The configured query engine\n", - " query (str): Search query\n", - " include_metadata (bool): Whether to include detailed metadata\n", - " \n", - " Returns:\n", - " Dict[str, any]: Search results with response and sources\n", - " \"\"\"\n", - " if not query_engine:\n", - " return {\n", - " \"success\": False,\n", - " \"error\": \"Query engine not initialized.\",\n", - " \"response\": \"\",\n", - " \"sources\": [],\n", - " }\n", - " \n", - " try:\n", - " print(f\"๐Ÿ” Searching for: '{query}'\")\n", - " start_time = time.time()\n", - " \n", - " # Query the RAG system\n", - " response = query_engine.query(query)\n", - " \n", - " end_time = time.time()\n", - " \n", - " # Extract source information from retrieved nodes\n", - " sources = []\n", - " if hasattr(response, \"source_nodes\"):\n", - " for node in response.source_nodes:\n", - " source_info = {\n", - " \"text\": (\n", - " node.text[:500] + \"...\"\n", - " if len(node.text) > 500\n", - " else node.text\n", - " ),\n", - " \"score\": getattr(node, \"score\", 0.0),\n", - " }\n", - " \n", - " # Add metadata if available and requested\n", - " if include_metadata and hasattr(node, \"metadata\"):\n", - " metadata = node.metadata\n", - " source_info.update({\n", - " \"file_name\": metadata.get(\"file_name\", \"Unknown\"),\n", - " \"title\": metadata.get(\"title\", \"Unknown Title\"),\n", - " \"authors\": metadata.get(\"authors\", \"Unknown Authors\"),\n", - " \"page_count\": metadata.get(\"page_count\", 0),\n", - " \"has_abstract\": metadata.get(\"has_abstract\", False),\n", - " })\n", - " \n", - " sources.append(source_info)\n", - " \n", - " result = {\n", - " \"success\": True,\n", - " \"response\": str(response),\n", - " \"sources\": sources,\n", - " \"query\": query,\n", - " \"search_time\": end_time - start_time,\n", - " \"num_sources\": len(sources),\n", - " }\n", - " \n", - " print(f\"โœ“ Search completed in {end_time - start_time:.2f} seconds\")\n", - " print(f\"๐Ÿ“š Found {len(sources)} relevant sources\")\n", - " \n", - " return result\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error during search: {e}\")\n", - " return {\"success\": False, \"error\": str(e), \"response\": \"\", \"sources\": []}\n", - "\n", - "# Test the search function with a sample query\n", - "test_query = \"What are the main types of AI agents discussed in these papers?\"\n", - "result = search_papers(query_engine, test_query)\n", - "\n", - "if result[\"success\"]:\n", - " print(f\"\\n๐Ÿ“ Response Preview: {result['response']}\")\n", - " print(f\"๐Ÿ“Š Number of sources: {result['num_sources']}\")\n", - " print(f\"โฑ๏ธ Search time: {result['search_time']:.2f} seconds\")\n", - "else:\n", - " print(f\"โŒ Search failed: {result['error']}\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 9. Paper Information and Metadata\n", - "\n", - "Let's create functions to list and get detailed information about the papers in our index.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“š Found 229 papers in the index:\n", - "============================================================\n", - "1. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "2. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "3. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "4. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "5. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "6. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "7. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "8. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "9. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "10. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "11. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "12. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "13. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "14. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "15. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "16. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "17. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "18. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "19. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "20. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "21. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "22. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "23. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "24. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "25. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "26. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "27. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "28. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "29. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "30. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "31. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "32. AI_Agents_vs_Agentic_AI\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/AI_Agents_vs_Agentic_AI.pdf\n", - " Total Pages: Unknown\n", - " Size: 3.05 MB\n", - "----------------------------------------\n", - "33. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "34. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "35. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "36. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "37. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "38. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "39. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "40. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "41. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "42. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "43. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "44. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "45. Emerging_Agent_Architectures\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Emerging_Agent_Architectures.pdf\n", - " Total Pages: Unknown\n", - " Size: 1.58 MB\n", - "----------------------------------------\n", - "46. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "47. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "48. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "49. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "50. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "51. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "52. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "53. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "54. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "55. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "56. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "57. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "58. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "59. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "60. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "61. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "62. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "63. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "64. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "65. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "66. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "67. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "68. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "69. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "70. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "71. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "72. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "73. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "74. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "75. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "76. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "77. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "78. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "79. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "80. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "81. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "82. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "83. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "84. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "85. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "86. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "87. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "88. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "89. LLMReasoning_to_Autonomous_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/LLMReasoning_to_Autonomous_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 16.21 MB\n", - "----------------------------------------\n", - "90. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "91. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "92. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "93. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "94. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "95. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "96. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "97. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "98. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "99. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "100. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "101. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "102. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "103. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "104. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "105. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "106. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "107. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "108. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "109. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "110. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "111. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "112. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "113. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "114. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "115. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "116. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "117. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "118. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "119. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "120. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "121. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "122. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "123. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "124. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "125. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "126. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "127. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "128. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "129. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "130. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "131. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "132. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "133. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "134. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "135. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "136. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "137. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "138. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "139. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "140. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "141. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "142. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "143. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "144. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "145. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "146. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "147. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "148. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "149. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "150. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "151. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "152. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "153. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "154. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "155. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "156. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "157. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "158. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "159. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "160. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "161. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "162. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "163. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "164. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "165. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "166. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "167. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "168. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "169. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "170. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "171. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "172. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "173. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "174. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "175. Rise_and_Potential_LLM_Agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/Rise_and_Potential_LLM_Agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 6.52 MB\n", - "----------------------------------------\n", - "176. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "177. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "178. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "179. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "180. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "181. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "182. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "183. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "184. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "185. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "186. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "187. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "188. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "189. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "190. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "191. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "192. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "193. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "194. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "195. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "196. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "197. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "198. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "199. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "200. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "201. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "202. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "203. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "204. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "205. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "206. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "207. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "208. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "209. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "210. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "211. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "212. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "213. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "214. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "215. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "216. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "217. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "218. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "219. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "220. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "221. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "222. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "223. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "224. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "225. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "226. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "227. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "228. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n", - "229. survey_of_self_evolving_agents\n", - " File Path: /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/../papers/agents/survey_of_self_evolving_agents.pdf\n", - " Total Pages: Unknown\n", - " Size: 10.56 MB\n", - "----------------------------------------\n" - ] - } - ], - "source": [ - "def list_indexed_papers(documents: List) -> List[Dict[str, any]]:\n", - " \"\"\"\n", - " List all papers that have been indexed with their metadata.\n", - " \n", - " Args:\n", - " documents (List): List of loaded documents\n", - " \n", - " Returns:\n", - " List[Dict[str, any]]: List of paper information\n", - " \"\"\"\n", - " papers = []\n", - " \n", - " for doc in documents:\n", - " try:\n", - " metadata = doc.metadata\n", - " file_path = metadata.get(\"file_path\", \"\")\n", - " file_name = Path(file_path).stem if file_path else \"Unknown\"\n", - " \n", - " paper_info = {\n", - " \"file_name\": file_name,\n", - " \"file_path\": file_path,\n", - " \"title\": metadata.get(\"title\", file_name),\n", - " \"authors\": metadata.get(\"authors\", \"Unknown\"),\n", - " \"page_count\": metadata.get(\"page_count\", 0),\n", - " \"file_size\": metadata.get(\"file_size\", 0),\n", - " \"file_size_mb\": round(metadata.get(\"file_size\", 0) / (1024 * 1024), 2) if metadata.get(\"file_size\") else 0,\n", - " \"total_pages\": metadata.get(\"total_pages\", \"Unknown\"),\n", - " }\n", - " \n", - " papers.append(paper_info)\n", - " \n", - " except Exception as e:\n", - " print(f\"Error processing document: {e}\")\n", - " \n", - " return papers\n", - "\n", - "# List all indexed papers\n", - "papers_list = list_indexed_papers(documents)\n", - "\n", - "print(f\"๐Ÿ“š Found {len(papers_list)} papers in the index:\")\n", - "print(\"=\" * 60)\n", - "\n", - "for i, paper in enumerate(papers_list, 1):\n", - " print(f\"{i}. {paper['file_name']}\")\n", - " print(f\" File Path: {paper['file_path']}\")\n", - " print(f\" Total Pages: {paper.get('total_pages', 'Unknown')}\")\n", - " print(f\" Size: {paper['file_size_mb']} MB\")\n", - " print(\"-\" * 40)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 11. Advanced Query Examples\n", - "\n", - "Now let's test our RAG system with various types of research queries to demonstrate its capabilities.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿงช Running Example Queries\n", - "============================================================\n", - "\n", - "1. Agent Types\n", - "Q: What are the main types of AI agents discussed in these papers?\n", - "--------------------------------------------------\n", - "๐Ÿ” Searching for: 'What are the main types of AI agents discussed in these papers?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:50:35,859 - INFO - query_type :, vector\n", - "2025-09-20 12:50:38,020 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:41,718 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:45,508 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 10.96 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "A: The main types of AI agents discussed are AI Agents and Agentic AI. AI Agents are modular systems designed for narrow, task-specific automation, leveraging large language models (LLMs) and large information models (LIMs) for applications like customer support, email filtering, and personalized content recommendation. Agentic AI, however, signifies a more advanced paradigm with features like multi-...\n", - "๐Ÿ“š Sources: 5 | โฑ๏ธ Time: 10.96s\n", - "\n", - "\n", - "2. Technical Comparison\n", - "Q: How do LLM-based agents differ from traditional AI agents?\n", - "--------------------------------------------------\n", - "๐Ÿ” Searching for: 'How do LLM-based agents differ from traditional AI agents?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:50:46,942 - INFO - query_type :, vector\n", - "2025-09-20 12:50:48,038 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:51,488 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:53,684 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:55,402 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 8.80 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "A: LLM-based agents differ from traditional AI agents by incorporating large language models with modular toolkits, which facilitate autonomous decision-making and multi-step reasoning. This integration enables LLM-based agents to execute a diverse array of tasks across multiple domains, including materials science, biomedical research, and software engineering. Additionally, they support agent-to-ag...\n", - "๐Ÿ“š Sources: 5 | โฑ๏ธ Time: 8.80s\n", - "\n", - "\n", - "3. Challenges\n", - "Q: What are the current challenges in developing autonomous agents?\n", - "--------------------------------------------------\n", - "๐Ÿ” Searching for: 'What are the current challenges in developing autonomous agents?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:50:56,932 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:50:58,852 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:02,156 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:03,545 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 8.20 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "A: The current challenges in developing autonomous agents include addressing limitations such as a lack of causal reasoning, constraints from large language models like hallucinations and shallow reasoning, incomplete agentic properties such as autonomy and proactivity, and difficulties in long-horizon planning and recovery. Additional challenges involve inter-agent error cascades, coordination break...\n", - "๐Ÿ“š Sources: 5 | โฑ๏ธ Time: 8.20s\n", - "\n", - "\n", - "4. Evaluation\n", - "Q: What evaluation methods are used for AI agents?\n", - "--------------------------------------------------\n", - "๐Ÿ” Searching for: 'What evaluation methods are used for AI agents?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:51:04,575 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:08,270 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 4.67 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "A: AI agents are evaluated using a variety of methods that focus on different aspects of their performance. These include benchmark-based evaluations that assess task completion, reasoning quality, and generalization ability. Specific benchmarks are used for tool and API-driven agents, web navigation and browsing agents, and multi-agent collaboration. These benchmarks often involve structured tasks w...\n", - "๐Ÿ“š Sources: 5 | โฑ๏ธ Time: 4.67s\n", - "\n", - "\n", - "5. Architecture\n", - "Q: Describe the common architectural patterns for agent systems.\n", - "--------------------------------------------------\n", - "๐Ÿ” Searching for: 'Describe the common architectural patterns for agent systems.'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:51:09,390 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:11,772 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:13,412 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:14,543 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 6.32 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "A: Common architectural patterns for agent systems include single-agent and multi-agent architectures. Single-agent patterns typically involve a defined persona and set of tools, with opportunities for human feedback and iterative goal achievement. Multi-agent architectures can be categorized into vertical and horizontal structures. Vertical architectures have a lead agent with other agents reporting...\n", - "๐Ÿ“š Sources: 5 | โฑ๏ธ Time: 6.32s\n", - "\n", - "\n", - "6. Applications\n", - "Q: What are the practical applications of AI agents mentioned in the literature?\n", - "--------------------------------------------------\n", - "๐Ÿ” Searching for: 'What are the practical applications of AI agents mentioned in the literature?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:51:15,441 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:17,999 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:51:20,486 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 8.23 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "A: AI agents have practical applications in enterprise settings, including customer support, email filtering, personalized content recommendation, and autonomous scheduling. They enhance customer engagement and business intelligence by inferring user preferences and generating personalized suggestions. In analytics, AI agents enable natural-language data queries and automated report generation. Auton...\n", - "๐Ÿ“š Sources: 5 | โฑ๏ธ Time: 8.23s\n", - "\n" - ] - } - ], - "source": [ - "def run_example_queries(query_engine):\n", - " \"\"\"\n", - " Run a series of example queries to demonstrate RAG capabilities.\n", - " \n", - " Args:\n", - " query_engine: The configured query engine\n", - " \"\"\"\n", - " example_queries = [\n", - " {\n", - " \"category\": \"Agent Types\",\n", - " \"query\": \"What are the main types of AI agents discussed in these papers?\",\n", - " },\n", - " {\n", - " \"category\": \"Technical Comparison\", \n", - " \"query\": \"How do LLM-based agents differ from traditional AI agents?\",\n", - " },\n", - " {\n", - " \"category\": \"Challenges\",\n", - " \"query\": \"What are the current challenges in developing autonomous agents?\",\n", - " },\n", - " {\n", - " \"category\": \"Evaluation\",\n", - " \"query\": \"What evaluation methods are used for AI agents?\",\n", - " },\n", - " {\n", - " \"category\": \"Architecture\",\n", - " \"query\": \"Describe the common architectural patterns for agent systems.\",\n", - " },\n", - " {\n", - " \"category\": \"Applications\",\n", - " \"query\": \"What are the practical applications of AI agents mentioned in the literature?\",\n", - " },\n", - " ]\n", - " \n", - " print(\"๐Ÿงช Running Example Queries\")\n", - " print(\"=\" * 60)\n", - " \n", - " for i, example in enumerate(example_queries, 1):\n", - " print(f\"\\n{i}. {example['category']}\")\n", - " print(f\"Q: {example['query']}\")\n", - " print(\"-\" * 50)\n", - " \n", - " result = search_papers(query_engine, example[\"query\"])\n", - " \n", - " if result[\"success\"]:\n", - " # Display truncated response\n", - " response = result[\"response\"]\n", - " if len(response) > 400:\n", - " response = response[:400] + \"...\"\n", - " \n", - " print(f\"A: {response}\")\n", - " print(f\"๐Ÿ“š Sources: {result['num_sources']} | โฑ๏ธ Time: {result['search_time']:.2f}s\")\n", - " else:\n", - " print(f\"โŒ Error: {result['error']}\")\n", - " \n", - " print()\n", - "\n", - "# Run the example queries\n", - "if query_engine:\n", - " run_example_queries(query_engine)\n", - "else:\n", - " print(\"โŒ Query engine not available for examples\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 12. Interactive Query Interface\n", - "\n", - "Let's create an interactive function that allows you to ask custom questions about the papers.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โ“ Question: What are the key ethical considerations for AI agents?\n", - "============================================================\n", - "๐Ÿ” Searching for: 'What are the key ethical considerations for AI agents?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:51:51,888 - INFO - query_type :, vector\n", - "2025-09-20 12:51:54,164 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 4.09 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "๐Ÿ’ก Answer:\n", - "Key ethical considerations for AI agents include ensuring accountability, fairness, and value alignment. These considerations are crucial due to the distributed and autonomous nature of AI systems, which can create accountability gaps when multiple agents interact to produce an outcome. Additionally, there is a need to address bias propagation and amplification, as agents trained on biased data may reinforce skewed decisions, leading to systemic inequities. Ethical governance frameworks are essential to ensure responsible deployment, defining accountability, oversight, and value alignment across autonomous agent networks.\n", - "\n", - "๐Ÿ“Š Search completed in 4.09 seconds\n", - "๐Ÿ“š Found 5 relevant sources\n", - "\n", - "๐Ÿ“– Source Details:\n", - "----------------------------------------\n", - "\n", - "1. Score: 0.668\n", - " Text: Lastly, to build user confidence, agents must\n", - "prioritize Trust & Safety mechanisms through verifiable out-\n", - "put logging, bias detection, and ethical guardrails especially\n", - "as their autonomy increases. T...\n", - "\n", - "2. Score: 0.650\n", - " Text: for errors or unintended consequences. This ambiguity\n", - "complicates legal liability, regulatory compliance, and\n", - "user trust, especially in domains such as healthcare,\n", - "finance, or defense. Furthermore, bi...\n", - "\n", - "3. Score: 0.613\n", - " Text: AI Agents vs. Agentic AI: A Conceptual\n", - "Taxonomy, Applications and Challenges\n", - "Ranjan Sapkotaโˆ—โ€ก, Konstantinos I. Roumeliotis โ€ , Manoj Karkee โˆ—โ€ก\n", - "โˆ—Cornell University, Department of Environmental and Biolo...\n", - "\n", - "4. Score: 0.609\n", - " Text: These laws are hierarchical: the Second cannot override\n", - "the First, and the Third cannot override the First or Second. Although conceived as fictional moral constraints, they have become\n", - "influential in...\n", - "\n", - "5. Score: 0.608\n", - " Text: For\n", - "example, a fact-checking agent fed with tampered data\n", - "could unintentionally legitimize false claims, which are\n", - "then integrated into downstream reasoning by summa-\n", - "rization or decision-making agent...\n" - ] - } - ], - "source": [ - "def ask_question(query_engine, question: str, show_sources: bool = True):\n", - " \"\"\"\n", - " Ask a custom question to the RAG system and display results.\n", - " \n", - " Args:\n", - " query_engine: The configured query engine\n", - " question (str): Your question about the papers\n", - " show_sources (bool): Whether to display source information\n", - " \"\"\"\n", - " print(f\"โ“ Question: {question}\")\n", - " print(\"=\" * 60)\n", - " \n", - " result = search_papers(query_engine, question, include_metadata=True)\n", - " \n", - " if result[\"success\"]:\n", - " print(f\"๐Ÿ’ก Answer:\")\n", - " print(result[\"response\"])\n", - " print(f\"\\n๐Ÿ“Š Search completed in {result['search_time']:.2f} seconds\")\n", - " print(f\"๐Ÿ“š Found {result['num_sources']} relevant sources\")\n", - " \n", - " if show_sources and result[\"sources\"]:\n", - " print(f\"\\n๐Ÿ“– Source Details:\")\n", - " print(\"-\" * 40)\n", - " for i, source in enumerate(result[\"sources\"], 1):\n", - " print(f\"\\n{i}. Score: {source.get('score', 0):.3f}\")\n", - " print(f\" Text: {source['text'][:200]}...\")\n", - " \n", - " else:\n", - " print(f\"โŒ Error: {result['error']}\")\n", - "\n", - "# Example usage - you can modify this question\n", - "custom_question = \"What are the key ethical considerations for AI agents?\"\n", - "\n", - "if query_engine:\n", - " ask_question(query_engine, custom_question, show_sources=True)\n", - "else:\n", - " print(\"โŒ Query engine not available\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 13. System Performance and Statistics\n", - "\n", - "Let's create functions to analyze and display performance statistics of our RAG system.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“Š RAG System Statistics\n", - "==================================================\n", - "๐Ÿ“š Document Statistics:\n", - " Total Papers: 229\n", - " Total Pages: 0\n", - " Total Size: 1962.34 MB\n", - " Average Pages per Paper: 0.0\n", - "\n", - "๐Ÿ—‚๏ธ Index Statistics:\n", - " Index Type: Vector Store Index\n", - " Embedding Model: text-embedding-3-small\n", - " LLM Model: gpt-4o-mini\n", - "\n", - "๐Ÿ’พ Storage Locations:\n", - " Papers Folder: papers/agents\n", - " Vector Database: storage/papers_vectordb\n", - " Index Storage: storage/papers_index\n", - "\n", - "๐Ÿ“‹ Paper Titles:\n", - " 1. AI_Agents_vs_Agentic_AI\n", - " 2. AI_Agents_vs_Agentic_AI\n", - " 3. AI_Agents_vs_Agentic_AI\n", - " 4. AI_Agents_vs_Agentic_AI\n", - " 5. AI_Agents_vs_Agentic_AI\n", - " 6. AI_Agents_vs_Agentic_AI\n", - " 7. AI_Agents_vs_Agentic_AI\n", - " 8. AI_Agents_vs_Agentic_AI\n", - " 9. AI_Agents_vs_Agentic_AI\n", - " 10. AI_Agents_vs_Agentic_AI\n", - " 11. AI_Agents_vs_Agentic_AI\n", - " 12. AI_Agents_vs_Agentic_AI\n", - " 13. AI_Agents_vs_Agentic_AI\n", - " 14. AI_Agents_vs_Agentic_AI\n", - " 15. AI_Agents_vs_Agentic_AI\n", - " 16. AI_Agents_vs_Agentic_AI\n", - " 17. AI_Agents_vs_Agentic_AI\n", - " 18. AI_Agents_vs_Agentic_AI\n", - " 19. AI_Agents_vs_Agentic_AI\n", - " 20. AI_Agents_vs_Agentic_AI\n", - " 21. AI_Agents_vs_Agentic_AI\n", - " 22. AI_Agents_vs_Agentic_AI\n", - " 23. AI_Agents_vs_Agentic_AI\n", - " 24. AI_Agents_vs_Agentic_AI\n", - " 25. AI_Agents_vs_Agentic_AI\n", - " 26. AI_Agents_vs_Agentic_AI\n", - " 27. AI_Agents_vs_Agentic_AI\n", - " 28. AI_Agents_vs_Agentic_AI\n", - " 29. AI_Agents_vs_Agentic_AI\n", - " 30. AI_Agents_vs_Agentic_AI\n", - " 31. AI_Agents_vs_Agentic_AI\n", - " 32. AI_Agents_vs_Agentic_AI\n", - " 33. Emerging_Agent_Architectures\n", - " 34. Emerging_Agent_Architectures\n", - " 35. Emerging_Agent_Architectures\n", - " 36. Emerging_Agent_Architectures\n", - " 37. Emerging_Agent_Architectures\n", - " 38. Emerging_Agent_Architectures\n", - " 39. Emerging_Agent_Architectures\n", - " 40. Emerging_Agent_Architectures\n", - " 41. Emerging_Agent_Architectures\n", - " 42. Emerging_Agent_Architectures\n", - " 43. Emerging_Agent_Architectures\n", - " 44. Emerging_Agent_Architectures\n", - " 45. Emerging_Agent_Architectures\n", - " 46. LLMReasoning_to_Autonomous_Agents\n", - " 47. LLMReasoning_to_Autonomous_Agents\n", - " 48. LLMReasoning_to_Autonomous_Agents\n", - " 49. LLMReasoning_to_Autonomous_Agents\n", - " 50. LLMReasoning_to_Autonomous_Agents\n", - " 51. LLMReasoning_to_Autonomous_Agents\n", - " 52. LLMReasoning_to_Autonomous_Agents\n", - " 53. LLMReasoning_to_Autonomous_Agents\n", - " 54. LLMReasoning_to_Autonomous_Agents\n", - " 55. LLMReasoning_to_Autonomous_Agents\n", - " 56. LLMReasoning_to_Autonomous_Agents\n", - " 57. LLMReasoning_to_Autonomous_Agents\n", - " 58. LLMReasoning_to_Autonomous_Agents\n", - " 59. LLMReasoning_to_Autonomous_Agents\n", - " 60. LLMReasoning_to_Autonomous_Agents\n", - " 61. LLMReasoning_to_Autonomous_Agents\n", - " 62. LLMReasoning_to_Autonomous_Agents\n", - " 63. LLMReasoning_to_Autonomous_Agents\n", - " 64. LLMReasoning_to_Autonomous_Agents\n", - " 65. LLMReasoning_to_Autonomous_Agents\n", - " 66. LLMReasoning_to_Autonomous_Agents\n", - " 67. LLMReasoning_to_Autonomous_Agents\n", - " 68. LLMReasoning_to_Autonomous_Agents\n", - " 69. LLMReasoning_to_Autonomous_Agents\n", - " 70. LLMReasoning_to_Autonomous_Agents\n", - " 71. LLMReasoning_to_Autonomous_Agents\n", - " 72. LLMReasoning_to_Autonomous_Agents\n", - " 73. LLMReasoning_to_Autonomous_Agents\n", - " 74. LLMReasoning_to_Autonomous_Agents\n", - " 75. LLMReasoning_to_Autonomous_Agents\n", - " 76. LLMReasoning_to_Autonomous_Agents\n", - " 77. LLMReasoning_to_Autonomous_Agents\n", - " 78. LLMReasoning_to_Autonomous_Agents\n", - " 79. LLMReasoning_to_Autonomous_Agents\n", - " 80. LLMReasoning_to_Autonomous_Agents\n", - " 81. LLMReasoning_to_Autonomous_Agents\n", - " 82. LLMReasoning_to_Autonomous_Agents\n", - " 83. LLMReasoning_to_Autonomous_Agents\n", - " 84. LLMReasoning_to_Autonomous_Agents\n", - " 85. LLMReasoning_to_Autonomous_Agents\n", - " 86. LLMReasoning_to_Autonomous_Agents\n", - " 87. LLMReasoning_to_Autonomous_Agents\n", - " 88. LLMReasoning_to_Autonomous_Agents\n", - " 89. LLMReasoning_to_Autonomous_Agents\n", - " 90. Rise_and_Potential_LLM_Agents\n", - " 91. Rise_and_Potential_LLM_Agents\n", - " 92. Rise_and_Potential_LLM_Agents\n", - " 93. Rise_and_Potential_LLM_Agents\n", - " 94. Rise_and_Potential_LLM_Agents\n", - " 95. Rise_and_Potential_LLM_Agents\n", - " 96. Rise_and_Potential_LLM_Agents\n", - " 97. Rise_and_Potential_LLM_Agents\n", - " 98. Rise_and_Potential_LLM_Agents\n", - " 99. Rise_and_Potential_LLM_Agents\n", - " 100. Rise_and_Potential_LLM_Agents\n", - " 101. Rise_and_Potential_LLM_Agents\n", - " 102. Rise_and_Potential_LLM_Agents\n", - " 103. Rise_and_Potential_LLM_Agents\n", - " 104. Rise_and_Potential_LLM_Agents\n", - " 105. Rise_and_Potential_LLM_Agents\n", - " 106. Rise_and_Potential_LLM_Agents\n", - " 107. Rise_and_Potential_LLM_Agents\n", - " 108. Rise_and_Potential_LLM_Agents\n", - " 109. Rise_and_Potential_LLM_Agents\n", - " 110. Rise_and_Potential_LLM_Agents\n", - " 111. Rise_and_Potential_LLM_Agents\n", - " 112. Rise_and_Potential_LLM_Agents\n", - " 113. Rise_and_Potential_LLM_Agents\n", - " 114. Rise_and_Potential_LLM_Agents\n", - " 115. Rise_and_Potential_LLM_Agents\n", - " 116. Rise_and_Potential_LLM_Agents\n", - " 117. Rise_and_Potential_LLM_Agents\n", - " 118. Rise_and_Potential_LLM_Agents\n", - " 119. Rise_and_Potential_LLM_Agents\n", - " 120. Rise_and_Potential_LLM_Agents\n", - " 121. Rise_and_Potential_LLM_Agents\n", - " 122. Rise_and_Potential_LLM_Agents\n", - " 123. Rise_and_Potential_LLM_Agents\n", - " 124. Rise_and_Potential_LLM_Agents\n", - " 125. Rise_and_Potential_LLM_Agents\n", - " 126. Rise_and_Potential_LLM_Agents\n", - " 127. Rise_and_Potential_LLM_Agents\n", - " 128. Rise_and_Potential_LLM_Agents\n", - " 129. Rise_and_Potential_LLM_Agents\n", - " 130. Rise_and_Potential_LLM_Agents\n", - " 131. Rise_and_Potential_LLM_Agents\n", - " 132. Rise_and_Potential_LLM_Agents\n", - " 133. Rise_and_Potential_LLM_Agents\n", - " 134. Rise_and_Potential_LLM_Agents\n", - " 135. Rise_and_Potential_LLM_Agents\n", - " 136. Rise_and_Potential_LLM_Agents\n", - " 137. Rise_and_Potential_LLM_Agents\n", - " 138. Rise_and_Potential_LLM_Agents\n", - " 139. Rise_and_Potential_LLM_Agents\n", - " 140. Rise_and_Potential_LLM_Agents\n", - " 141. Rise_and_Potential_LLM_Agents\n", - " 142. Rise_and_Potential_LLM_Agents\n", - " 143. Rise_and_Potential_LLM_Agents\n", - " 144. Rise_and_Potential_LLM_Agents\n", - " 145. Rise_and_Potential_LLM_Agents\n", - " 146. Rise_and_Potential_LLM_Agents\n", - " 147. Rise_and_Potential_LLM_Agents\n", - " 148. Rise_and_Potential_LLM_Agents\n", - " 149. Rise_and_Potential_LLM_Agents\n", - " 150. Rise_and_Potential_LLM_Agents\n", - " 151. Rise_and_Potential_LLM_Agents\n", - " 152. Rise_and_Potential_LLM_Agents\n", - " 153. Rise_and_Potential_LLM_Agents\n", - " 154. Rise_and_Potential_LLM_Agents\n", - " 155. Rise_and_Potential_LLM_Agents\n", - " 156. Rise_and_Potential_LLM_Agents\n", - " 157. Rise_and_Potential_LLM_Agents\n", - " 158. Rise_and_Potential_LLM_Agents\n", - " 159. Rise_and_Potential_LLM_Agents\n", - " 160. Rise_and_Potential_LLM_Agents\n", - " 161. Rise_and_Potential_LLM_Agents\n", - " 162. Rise_and_Potential_LLM_Agents\n", - " 163. Rise_and_Potential_LLM_Agents\n", - " 164. Rise_and_Potential_LLM_Agents\n", - " 165. Rise_and_Potential_LLM_Agents\n", - " 166. Rise_and_Potential_LLM_Agents\n", - " 167. Rise_and_Potential_LLM_Agents\n", - " 168. Rise_and_Potential_LLM_Agents\n", - " 169. Rise_and_Potential_LLM_Agents\n", - " 170. Rise_and_Potential_LLM_Agents\n", - " 171. Rise_and_Potential_LLM_Agents\n", - " 172. Rise_and_Potential_LLM_Agents\n", - " 173. Rise_and_Potential_LLM_Agents\n", - " 174. Rise_and_Potential_LLM_Agents\n", - " 175. Rise_and_Potential_LLM_Agents\n", - " 176. survey_of_self_evolving_agents\n", - " 177. survey_of_self_evolving_agents\n", - " 178. survey_of_self_evolving_agents\n", - " 179. survey_of_self_evolving_agents\n", - " 180. survey_of_self_evolving_agents\n", - " 181. survey_of_self_evolving_agents\n", - " 182. survey_of_self_evolving_agents\n", - " 183. survey_of_self_evolving_agents\n", - " 184. survey_of_self_evolving_agents\n", - " 185. survey_of_self_evolving_agents\n", - " 186. survey_of_self_evolving_agents\n", - " 187. survey_of_self_evolving_agents\n", - " 188. survey_of_self_evolving_agents\n", - " 189. survey_of_self_evolving_agents\n", - " 190. survey_of_self_evolving_agents\n", - " 191. survey_of_self_evolving_agents\n", - " 192. survey_of_self_evolving_agents\n", - " 193. survey_of_self_evolving_agents\n", - " 194. survey_of_self_evolving_agents\n", - " 195. survey_of_self_evolving_agents\n", - " 196. survey_of_self_evolving_agents\n", - " 197. survey_of_self_evolving_agents\n", - " 198. survey_of_self_evolving_agents\n", - " 199. survey_of_self_evolving_agents\n", - " 200. survey_of_self_evolving_agents\n", - " 201. survey_of_self_evolving_agents\n", - " 202. survey_of_self_evolving_agents\n", - " 203. survey_of_self_evolving_agents\n", - " 204. survey_of_self_evolving_agents\n", - " 205. survey_of_self_evolving_agents\n", - " 206. survey_of_self_evolving_agents\n", - " 207. survey_of_self_evolving_agents\n", - " 208. survey_of_self_evolving_agents\n", - " 209. survey_of_self_evolving_agents\n", - " 210. survey_of_self_evolving_agents\n", - " 211. survey_of_self_evolving_agents\n", - " 212. survey_of_self_evolving_agents\n", - " 213. survey_of_self_evolving_agents\n", - " 214. survey_of_self_evolving_agents\n", - " 215. survey_of_self_evolving_agents\n", - " 216. survey_of_self_evolving_agents\n", - " 217. survey_of_self_evolving_agents\n", - " 218. survey_of_self_evolving_agents\n", - " 219. survey_of_self_evolving_agents\n", - " 220. survey_of_self_evolving_agents\n", - " 221. survey_of_self_evolving_agents\n", - " 222. survey_of_self_evolving_agents\n", - " 223. survey_of_self_evolving_agents\n", - " 224. survey_of_self_evolving_agents\n", - " 225. survey_of_self_evolving_agents\n", - " 226. survey_of_self_evolving_agents\n", - " 227. survey_of_self_evolving_agents\n", - " 228. survey_of_self_evolving_agents\n", - " 229. survey_of_self_evolving_agents\n", - "\n", - "โœ… RAG System Analysis Complete!\n" - ] - } - ], - "source": [ - "def display_system_stats(papers_list, vector_store, index):\n", - " \"\"\"\n", - " Display comprehensive statistics about the RAG system.\n", - " \n", - " Args:\n", - " papers_list: List of indexed papers\n", - " vector_store: The vector store instance\n", - " index: The vector index\n", - " \"\"\"\n", - " print(\"๐Ÿ“Š RAG System Statistics\")\n", - " print(\"=\" * 50)\n", - " \n", - " # Paper statistics\n", - " total_papers = len(papers_list)\n", - " total_pages = sum(paper.get('page_count', 0) for paper in papers_list)\n", - " total_size_mb = sum(paper.get('file_size_mb', 0) for paper in papers_list)\n", - " \n", - " print(f\"๐Ÿ“š Document Statistics:\")\n", - " print(f\" Total Papers: {total_papers}\")\n", - " print(f\" Total Pages: {total_pages}\")\n", - " print(f\" Total Size: {total_size_mb:.2f} MB\")\n", - " print(f\" Average Pages per Paper: {total_pages/total_papers:.1f}\" if total_papers > 0 else \" Average Pages: N/A\")\n", - " \n", - " # Index statistics\n", - " if index:\n", - " print(f\"\\n๐Ÿ—‚๏ธ Index Statistics:\")\n", - " print(f\" Index Type: Vector Store Index\")\n", - " print(f\" Embedding Model: {get_config('api.openai.embedding_model', 'text-embedding-3-small')}\")\n", - " print(f\" LLM Model: {get_config('api.openai.model', 'gpt-4o-mini')}\")\n", - " \n", - " # Storage paths\n", - " print(f\"\\n๐Ÿ’พ Storage Locations:\")\n", - " print(f\" Papers Folder: papers/agents\")\n", - " print(f\" Vector Database: storage/papers_vectordb\")\n", - " print(f\" Index Storage: storage/papers_index\")\n", - " \n", - " # Recent papers by modification time\n", - " if papers_list:\n", - " print(f\"\\n๐Ÿ“‹ Paper Titles:\")\n", - " for i, paper in enumerate(papers_list, 1):\n", - " title = paper['title']\n", - " if len(title) > 50:\n", - " title = title[:47] + \"...\"\n", - " print(f\" {i}. {title}\")\n", - "\n", - "# Display system statistics\n", - "display_system_stats(papers_list, vector_store, index)\n", - "print(\"\\nโœ… RAG System Analysis Complete!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conclusion\n", - "\n", - "๐ŸŽ‰ **Congratulations!** You have successfully built a complete RAG (Retrieval-Augmented Generation) system for academic papers using LlamaIndex.\n", - "\n", - "### What we accomplished:\n", - "\n", - "1. **Environment Setup**: Configured API keys and dependencies\n", - "2. **Configuration Management**: Loaded system settings from YAML files\n", - "3. **LlamaIndex Configuration**: Set up embeddings, LLM, and text processing\n", - "4. **Vector Store**: Created a LanceDB vector database for storing embeddings\n", - "5. **Document Processing**: Loaded and processed PDF academic papers\n", - "6. **Vector Indexing**: Created searchable vector embeddings of documents\n", - "7. **Query Engine**: Set up retrieval and response generation\n", - "8. **Search Functions**: Implemented semantic search with metadata\n", - "9. **Paper Analysis**: Created functions for listing and summarizing papers\n", - "10. **Interactive Queries**: Built an interface for asking custom questions\n", - "11. **Performance Analytics**: Added system statistics and monitoring\n", - "\n", - "### Key Features:\n", - "\n", - "- **Semantic Search**: Find relevant content using natural language queries\n", - "- **Source Attribution**: Get detailed citations and references for answers\n", - "- **Metadata Integration**: Access paper titles, authors, and other metadata\n", - "- **Performance Monitoring**: Track search times and system statistics\n", - "- **Flexible Configuration**: Easy to modify models, chunk sizes, and parameters\n", - "\n", - "### Next Steps:\n", - "\n", - "1. **Experiment** with different queries to explore your document collection\n", - "2. **Modify** the `custom_question` variable to ask your own questions\n", - "3. **Adjust** parameters like `chunk_size`, `similarity_top_k` for different results\n", - "4. **Add** more papers to the `papers/agents` folder and rebuild the index\n", - "5. **Enhance** the system with additional features like filtering or ranking\n", - "\n", - "### Usage Tips:\n", - "\n", - "- Use specific, focused questions for better results\n", - "- Try different phrasings of the same question\n", - "- Check the source information to understand where answers come from\n", - "- Experiment with the `similarity_top_k` parameter to get more or fewer sources\n", - "\n", - "Happy researching! ๐Ÿ”ฌ๐Ÿ“š\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "accelerator", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/02_multimodal_rag_system.ipynb b/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/02_multimodal_rag_system.ipynb deleted file mode 100644 index 866ac95..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/02_multimodal_rag_system.ipynb +++ /dev/null @@ -1,1360 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Multimodal RAG System Tutorial\n", - "\n", - "This notebook extends our basic RAG system to handle multiple data types including PDFs, CSV files, JSON, Markdown, HTML, images, and audio files. We'll demonstrate the advanced capabilities of LlamaIndex's `SimpleDirectoryReader` for multimodal data processing.\n", - "\n", - "## What's New in This Tutorial\n", - "\n", - "Building upon our previous RAG system, we now add:\n", - "- **Multimodal Document Loading**: CSV, JSON, Markdown, HTML, Images, Audio\n", - "- **Advanced SimpleDirectoryReader Features**: File filtering, metadata extraction, custom processors\n", - "- **Cross-Modal Queries**: Search across different data types simultaneously\n", - "- **Structured Data Integration**: Combine tabular data with unstructured text\n", - "- **Visual Content Processing**: Extract information from images and charts\n", - "\n", - "## Supported File Types (Per LlamaIndex Documentation)\n", - "\n", - "According to the [SimpleDirectoryReader documentation](https://developers.llamaindex.ai/python/framework/module_guides/loading/simpledirectoryreader/), the following formats are automatically supported:\n", - "\n", - "- **.csv** - comma-separated values\n", - "- **.docx** - Microsoft Word \n", - "- **.epub** - EPUB ebook format\n", - "- **.hwp** - Hangul Word Processor\n", - "- **.ipynb** - Jupyter Notebook\n", - "- **.jpeg, .jpg** - JPEG image\n", - "- **.mbox** - MBOX email archive\n", - "- **.md** - Markdown\n", - "- **.mp3, .mp4** - audio and video\n", - "- **.pdf** - Portable Document Format\n", - "- **.png** - Portable Network Graphics\n", - "- **.ppt, .pptm, .pptx** - Microsoft PowerPoint\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Environment Setup and Configuration\n", - "\n", - "First, let's set up our environment with hardcoded configurations. We'll use OpenRouter for the LLM and local embeddings for cost-effective processing.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install -r \"../requirements.txt\"" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Environment variables loaded successfully\n", - "โœ“ LLM Model: gpt-5-mini\n", - "โœ“ Embedding Model: local:BAAI/bge-small-en-v1.5\n", - "Environment setup complete!\n" - ] - } - ], - "source": [ - "# Environment setup with hardcoded configurations\n", - "import os\n", - "import time\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Tuple\n", - "import pandas as pd\n", - "import json\n", - "\n", - "from dotenv import load_dotenv\n", - "\n", - "# Hardcoded configuration\n", - "CONFIG = {\n", - " \"llm_model\": \"gpt-5-mini\",\n", - " \"embedding_model\": \"local:BAAI/bge-small-en-v1.5\",\n", - " \"chunk_size\": 1024,\n", - " \"chunk_overlap\": 100,\n", - " \"similarity_top_k\": 5,\n", - " \"data_path\": \"../data\",\n", - " \"vector_db_path\": \"storage/multimodal_vectordb\",\n", - " \"index_storage_path\": \"storage/multimodal_index\"\n", - "}\n", - "\n", - "def setup_environment():\n", - " \"\"\"\n", - " Setup environment variables and basic configuration.\n", - " \n", - " Returns:\n", - " bool: Success status\n", - " \"\"\"\n", - " # Load environment variables from .env file\n", - " load_dotenv()\n", - " \n", - " # Disable tokenizer warning\n", - " os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n", - " \n", - " # Check for required API key\n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " if not api_key:\n", - " print(\"โš ๏ธ OPENROUTER_API_KEY not found in environment variables\")\n", - " print(\"Please add your OpenRouter API key to a .env file\")\n", - " return False\n", - " \n", - " print(\"โœ“ Environment variables loaded successfully\")\n", - " print(f\"โœ“ LLM Model: {CONFIG['llm_model']}\")\n", - " print(f\"โœ“ Embedding Model: {CONFIG['embedding_model']}\")\n", - " return True\n", - "\n", - "# Run the setup\n", - "success = setup_environment()\n", - "if success:\n", - " print(\"Environment setup complete!\")\n", - "else:\n", - " print(\"Environment setup failed!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. LlamaIndex Configuration for Multimodal Data\n", - "\n", - "Let's configure LlamaIndex with our hardcoded settings for OpenRouter LLM and local embeddings.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐Ÿ”„ Multimodal vs. Unimodal Vector Index Creation\n", - "\n", - "### Understanding the Key Difference\n", - "\n", - "While **multimodal** and **unimodal** RAG systems use the same underlying `VectorStoreIndex.from_documents()` method, there's a **critical difference** in how existing indexes are loaded that affects system behavior and reliability.\n", - "\n", - "### ๐Ÿ“Š Index Creation Comparison\n", - "\n", - "| Aspect | Unimodal (Academic Papers) | Multimodal (This Notebook) | Impact |\n", - "|--------|----------------------------|----------------------------|---------|\n", - "| **Document Types** | Single type (PDF papers) | Multiple types (PDF, CSV, HTML, Images, Audio) | Different processing pipelines |\n", - "| **Index Creation** | `VectorStoreIndex.from_documents()` | `VectorStoreIndex.from_documents()` | **Identical** |\n", - "| **Storage Context** | โœ… Full StorageContext persistence | โœ… Full StorageContext persistence | **Identical** |\n", - "| **Index Loading** | `load_index_from_storage()` | `VectorStoreIndex.from_vector_store()` | **โš ๏ธ Different!** |\n", - "| **Metadata Complexity** | Single file type metadata | Rich cross-modal metadata | More complex relationships |\n", - "\n", - "### ๐Ÿ” The Critical Loading Difference\n", - "\n", - "**Unimodal Loading (Academic Papers):**\n", - "```python\n", - "# ROBUST: Complete index reconstruction\n", - "storage_context = StorageContext.from_defaults(\n", - " persist_dir=str(index_path), \n", - " vector_store=vector_store\n", - ")\n", - "index = load_index_from_storage(storage_context)\n", - "# โœ… Perfect restoration with all metadata and relationships\n", - "```\n", - "\n", - "**Multimodal Loading (This Notebook):**\n", - "```python\n", - "# BASIC: Vector-only reconstruction\n", - "storage_context = StorageContext.from_defaults(\n", - " persist_dir=str(index_path), \n", - " vector_store=vector_store\n", - ")\n", - "index = VectorStoreIndex.from_vector_store(\n", - " vector_store=vector_store,\n", - " storage_context=storage_context\n", - ")\n", - "# โš ๏ธ May lose some complex relationships between file types\n", - "```\n", - "\n", - "### ๐ŸŽฏ Why This Difference Matters\n", - "\n", - "**For Unimodal Systems:**\n", - "- Documents are homogeneous (all PDFs)\n", - "- `load_index_from_storage()` ensures perfect reconstruction\n", - "- Critical for academic reproducibility\n", - "\n", - "**For Multimodal Systems:**\n", - "- Documents are heterogeneous (PDFs, images, audio, CSV)\n", - "- `from_vector_store()` focuses on vector similarity\n", - "- Cross-modal relationships handled differently\n", - "- May prioritize performance over perfect metadata preservation\n", - "\n", - "### ๐Ÿ“ˆ Practical Implications\n", - "\n", - "| Scenario | Unimodal Advantage | Multimodal Trade-off |\n", - "|----------|-------------------|---------------------|\n", - "| **Research Reproducibility** | ๐ŸŽฏ Identical results every time | โš ๏ธ Minor variations possible |\n", - "| **Cross-Modal Queries** | โŒ Not applicable | โœ… Query across file types |\n", - "| **System Startup** | โšก Fastest (complete restoration) | ๐Ÿ”„ Fast (vector-based loading) |\n", - "| **Metadata Fidelity** | ๐Ÿ”’ 100% preserved | ๐Ÿ“Š Core metadata preserved |\n", - "| **File Type Diversity** | ๐Ÿ“„ Single type (PDFs) | ๐ŸŒˆ Multiple types supported |\n", - "\n", - "### ๐Ÿ› ๏ธ When to Use Each Approach\n", - "\n", - "**Choose Unimodal (`load_index_from_storage`) When:**\n", - "- Working with homogeneous document types\n", - "- Perfect reproducibility is critical\n", - "- Academic research or compliance requirements\n", - "- Complex document relationships matter\n", - "\n", - "**Choose Multimodal (`from_vector_store`) When:**\n", - "- Processing diverse file types simultaneously\n", - "- Cross-modal search is the priority\n", - "- Performance over perfect metadata preservation\n", - "- Building versatile content search systems\n", - "\n", - "### ๐ŸŽจ The Multimodal Advantage\n", - "\n", - "Despite the loading difference, multimodal indexing provides unique capabilities:\n", - "\n", - "1. **๐Ÿ” Cross-Modal Search**: Find information across PDFs, images, and data files\n", - "2. **๐Ÿ“Š Rich Content Types**: Handle structured (CSV) and unstructured (text) data together \n", - "3. **๐ŸŽต Audio Integration**: Include transcribed audio content in searches\n", - "4. **๐Ÿ–ผ๏ธ Visual Content**: Extract information from charts and diagrams\n", - "5. **๐Ÿ“ˆ Unified Knowledge Base**: Single search across all organizational content\n", - "\n", - "### ๐Ÿ’ก Best Practice Recommendation\n", - "\n", - "For production multimodal systems, consider implementing **hybrid loading**:\n", - "\n", - "```python\n", - "# Try robust loading first, fallback to vector-only\n", - "try:\n", - " index = load_index_from_storage(storage_context) # Full restoration\n", - " print(\"โœ“ Complete index restoration\")\n", - "except:\n", - " index = VectorStoreIndex.from_vector_store(vector_store) # Vector fallback\n", - " print(\"โœ“ Vector-based index loading\")\n", - "```\n", - "\n", - "This gives you the reliability of unimodal loading with the flexibility of multimodal processing.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ LLM configured: gpt-5-mini\n", - "โœ“ Embedding model configured: local:BAAI/bge-small-en-v1.5\n", - "โœ“ Text chunking configured: 1024 chars with 100 overlap\n", - "โœ“ LlamaIndex settings configured for multimodal processing\n" - ] - } - ], - "source": [ - "# LlamaIndex configuration with hardcoded settings\n", - "from llama_index.core import Settings\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "from llama_index.core.embeddings import resolve_embed_model\n", - "from llama_index.core.node_parser import SentenceSplitter\n", - "\n", - "def configure_llamaindex_settings():\n", - " \"\"\"Configure LlamaIndex global settings using hardcoded configuration.\"\"\"\n", - " \n", - " # Set up LLM with OpenRouter using hardcoded model\n", - " Settings.llm = OpenRouter(\n", - " api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n", - " model=CONFIG[\"llm_model\"]\n", - " )\n", - " print(f\"โœ“ LLM configured: {CONFIG['llm_model']}\")\n", - "\n", - " # Set up local embedding model (downloads locally first time, then cached)\n", - " Settings.embed_model = resolve_embed_model(CONFIG[\"embedding_model\"])\n", - " print(f\"โœ“ Embedding model configured: {CONFIG['embedding_model']}\")\n", - "\n", - " # Set up node parser for chunking with hardcoded settings\n", - " Settings.node_parser = SentenceSplitter(\n", - " chunk_size=CONFIG[\"chunk_size\"], \n", - " chunk_overlap=CONFIG[\"chunk_overlap\"]\n", - " )\n", - " print(f\"โœ“ Text chunking configured: {CONFIG['chunk_size']} chars with {CONFIG['chunk_overlap']} overlap\")\n", - "\n", - "# Configure the settings\n", - "configure_llamaindex_settings()\n", - "print(\"โœ“ LlamaIndex settings configured for multimodal processing\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Exploring Our Multimodal Dataset\n", - "\n", - "Let's examine the different types of files we have available for processing. This will show the diversity of data types that SimpleDirectoryReader can handle.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ—‚๏ธ Dataset Overview\n", - "==================================================\n", - "Total files found: 21\n", - "\n", - "๐Ÿ“ File Types Distribution:\n", - " .csv: 4 files (0.00 MB)\n", - " - italian_recipes.csv (0.0 MB)\n", - " - agent_performance_benchmark.csv (0.0 MB)\n", - " - agent_evaluation_metrics.csv (0.0 MB)\n", - " ... and 1 more\n", - "\n", - " .html: 2 files (0.00 MB)\n", - " - fitness_tracker.html (0.0 MB)\n", - " - agent_tutorial.html (0.0 MB)\n", - "\n", - " .md: 4 files (0.00 MB)\n", - " - recipe_instructions.md (0.0 MB)\n", - " - agent_framework_comparison.md (0.0 MB)\n", - " - market_analysis.md (0.0 MB)\n", - " ... and 1 more\n", - "\n", - " .mp3: 3 files (2.95 MB)\n", - " - rags.mp3 (0.81 MB)\n", - " - ai_agents.mp3 (1.54 MB)\n", - " - in_the_end.mp3 (0.6 MB)\n", - "\n", - " .pdf: 2 files (1.92 MB)\n", - " - AI_Agent_Frameworks.pdf (0.34 MB)\n", - " - Emerging_Agent_Architectures.pdf (1.58 MB)\n", - "\n", - " .png: 6 files (0.55 MB)\n", - " - recipe_popularity.png (0.04 MB)\n", - " - agent_types_comparison.png (0.1 MB)\n", - " - agent_performance_comparison.png (0.17 MB)\n", - " ... and 3 more\n", - "\n", - "โœ“ Found 21 files across 6 different file types\n" - ] - } - ], - "source": [ - "def explore_dataset(data_path: str = None):\n", - " \"\"\"\n", - " Explore and categorize the files in our dataset by type.\n", - " \n", - " Args:\n", - " data_path (str): Path to the data directory\n", - " \"\"\"\n", - " if data_path is None:\n", - " data_path = CONFIG[\"data_path\"]\n", - " \n", - " data_dir = Path(data_path)\n", - " if not data_dir.exists():\n", - " print(f\"Data directory not found: {data_dir}\")\n", - " return\n", - " \n", - " # Categorize files by type\n", - " file_types = {}\n", - " all_files = []\n", - " \n", - " # Walk through all files recursively\n", - " for file_path in data_dir.rglob(\"*\"):\n", - " if file_path.is_file():\n", - " suffix = file_path.suffix.lower()\n", - " file_size = file_path.stat().st_size\n", - " \n", - " if suffix not in file_types:\n", - " file_types[suffix] = []\n", - " \n", - " file_info = {\n", - " \"path\": str(file_path),\n", - " \"name\": file_path.name,\n", - " \"size_mb\": round(file_size / (1024 * 1024), 2),\n", - " \"size_bytes\": file_size\n", - " }\n", - " \n", - " file_types[suffix].append(file_info)\n", - " all_files.append(file_info)\n", - " \n", - " # Display summary\n", - " print(\"๐Ÿ—‚๏ธ Dataset Overview\")\n", - " print(\"=\" * 50)\n", - " print(f\"Total files found: {len(all_files)}\")\n", - " \n", - " print(f\"\\n๐Ÿ“ File Types Distribution:\")\n", - " for file_type, files in sorted(file_types.items()):\n", - " if file_type: # Skip files without extension\n", - " total_size = sum(f[\"size_mb\"] for f in files)\n", - " print(f\" {file_type}: {len(files)} files ({total_size:.2f} MB)\")\n", - " \n", - " # Show file details\n", - " for file_info in files[:3]: # Show first 3 files of each type\n", - " print(f\" - {file_info['name']} ({file_info['size_mb']} MB)\")\n", - " if len(files) > 3:\n", - " print(f\" ... and {len(files) - 3} more\")\n", - " \n", - " print()\n", - " \n", - " return file_types, all_files\n", - "\n", - "# Explore our dataset\n", - "file_types, all_files = explore_dataset()\n", - "print(f\"โœ“ Found {len(all_files)} files across {len(file_types)} different file types\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Basic Multimodal Document Loading\n", - "\n", - "Now let's use SimpleDirectoryReader to load all files from our data directory. This demonstrates the core multimodal capability.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ๐Ÿ” Index Creation Implementation Note\n", - "\n", - "The following implementation uses **multimodal-optimized loading** - notice the difference from the academic papers notebook:\n", - "\n", - "#### Key Implementation Differences:\n", - "\n", - "1. **Index Loading Method**: Uses `VectorStoreIndex.from_vector_store()` instead of `load_index_from_storage()`\n", - "2. **Reasoning**: Optimized for cross-modal search performance over perfect metadata preservation \n", - "3. **Trade-off**: Slightly less metadata fidelity but better handling of diverse file types\n", - "4. **Benefit**: More flexible loading for heterogeneous document collections\n", - "\n", - "This approach prioritizes the core multimodal capability while maintaining good performance and reliability.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“‚ Loading multimodal documents from: ../data\n", - "๐Ÿ”„ Processing files...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Successfully loaded 42 documents in 14.85 seconds\n", - "\n", - "๐Ÿ“Š Documents by MIME type:\n", - " application/pdf: 23 documents\n", - " audio/mpeg: 3 documents\n", - " image/png: 6 documents\n", - " text/csv: 4 documents\n", - " text/html: 2 documents\n", - " unknown: 4 documents\n", - "\n", - "๐Ÿ“„ Sample Document Analysis:\n", - "File: AI_Agent_Frameworks.pdf\n", - "Type: application/pdf\n", - "Size: 360523 bytes\n", - "Text preview: A Comprehensive Survey of AI Agent Frameworks\n", - "and Their Applications in Financial Services\n", - "Satyadhar Joshi\n", - "Independent\n", - "Alumnus, International MBA, Bar-Ilan University, Israel\n", - "satyadhar.joshi@gmail.com...\n", - "Metadata keys: ['page_label', 'file_name', 'file_path', 'file_type', 'file_size', 'creation_date', 'last_modified_date']\n" - ] - } - ], - "source": [ - "from llama_index.core import SimpleDirectoryReader\n", - "\n", - "def load_multimodal_documents(data_path: str = None, recursive: bool = True):\n", - " \"\"\"\n", - " Load documents from multiple file types using SimpleDirectoryReader.\n", - " \n", - " Args:\n", - " data_path (str): Path to directory containing multimodal data\n", - " recursive (bool): Whether to search subdirectories\n", - " \n", - " Returns:\n", - " List of Document objects\n", - " \"\"\"\n", - " if data_path is None:\n", - " data_path = CONFIG[\"data_path\"]\n", - " \n", - " print(f\"๐Ÿ“‚ Loading multimodal documents from: {data_path}\")\n", - " \n", - " # Create SimpleDirectoryReader with recursive search\n", - " reader = SimpleDirectoryReader(\n", - " input_dir=data_path,\n", - " recursive=recursive,\n", - " # Let SimpleDirectoryReader handle all supported file types automatically\n", - " )\n", - " \n", - " print(\"๐Ÿ”„ Processing files...\")\n", - " start_time = time.time()\n", - " \n", - " # Load all documents\n", - " documents = reader.load_data()\n", - " \n", - " end_time = time.time()\n", - " \n", - " print(f\"โœ… Successfully loaded {len(documents)} documents in {end_time - start_time:.2f} seconds\")\n", - " \n", - " # Analyze loaded documents by file type\n", - " doc_types = {}\n", - " for doc in documents:\n", - " file_type = doc.metadata.get('file_type', 'unknown')\n", - " if file_type not in doc_types:\n", - " doc_types[file_type] = []\n", - " doc_types[file_type].append(doc)\n", - " \n", - " print(f\"\\n๐Ÿ“Š Documents by MIME type:\")\n", - " for mime_type, docs in sorted(doc_types.items()):\n", - " print(f\" {mime_type}: {len(docs)} documents\")\n", - " \n", - " return documents\n", - "\n", - "# Load all multimodal documents\n", - "documents = load_multimodal_documents()\n", - "\n", - "# Show sample document information\n", - "if documents:\n", - " print(f\"\\n๐Ÿ“„ Sample Document Analysis:\")\n", - " sample_doc = documents[0]\n", - " print(f\"File: {sample_doc.metadata.get('file_name', 'Unknown')}\")\n", - " print(f\"Type: {sample_doc.metadata.get('file_type', 'Unknown')}\")\n", - " print(f\"Size: {sample_doc.metadata.get('file_size', 0)} bytes\")\n", - " print(f\"Text preview: {sample_doc.text[:200]}...\")\n", - " print(f\"Metadata keys: {list(sample_doc.metadata.keys())}\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Creating Multimodal Vector Index\n", - "\n", - "Now let's create a vector index that can handle our multimodal documents using LanceDB for efficient storage and retrieval.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:57:10,572 - WARNING - Table multimodal_documents doesn't exist yet. Please add some data to create it.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Setting up multimodal vector storage...\n", - "โœ“ Connected to LanceDB at: storage/multimodal_vectordb\n", - "โœ“ LanceDB vector store created for multimodal data\n", - "๐Ÿ”จ Creating new multimodal vector index...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 42/42 [00:00<00:00, 87.97it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 55/55 [00:02<00:00, 21.76it/s]\n", - "2025-09-20 12:57:13,598 - INFO - Create new table multimodal_documents adding data.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Multimodal index created in 3.07 seconds\n", - "๐Ÿ’พ Saving multimodal index to storage...\n", - "โœ“ Index saved successfully\n", - "โœ… Multimodal RAG system ready for cross-modal queries!\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[90m[\u001b[0m2025-09-20T07:27:13Z \u001b[33mWARN \u001b[0m lance::dataset::write::insert\u001b[90m]\u001b[0m No existing dataset at /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/storage/multimodal_vectordb/multimodal_documents.lance, it will be created\n" - ] - } - ], - "source": [ - "# Vector store and index creation\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.core import StorageContext, VectorStoreIndex\n", - "\n", - "def create_multimodal_vector_store(vector_db_path: str = None):\n", - " \"\"\"Create and configure LanceDB vector store for multimodal data.\"\"\"\n", - " if vector_db_path is None:\n", - " vector_db_path = CONFIG[\"vector_db_path\"]\n", - " \n", - " try:\n", - " import lancedb\n", - " \n", - " # Create storage directory\n", - " Path(vector_db_path).parent.mkdir(parents=True, exist_ok=True)\n", - " \n", - " # Connect to LanceDB\n", - " db = lancedb.connect(str(vector_db_path))\n", - " print(f\"โœ“ Connected to LanceDB at: {vector_db_path}\")\n", - " \n", - " # Create vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=str(vector_db_path), \n", - " table_name=\"multimodal_documents\"\n", - " )\n", - " print(\"โœ“ LanceDB vector store created for multimodal data\")\n", - " \n", - " return vector_store\n", - " \n", - " except Exception as e:\n", - " print(f\"Error creating vector store: {e}\")\n", - " return None\n", - "\n", - "def create_multimodal_index(documents: List, \n", - " vector_store, \n", - " index_storage_path: str = None,\n", - " force_rebuild: bool = False):\n", - " \"\"\"Create or load a multimodal vector index.\"\"\"\n", - " \n", - " if index_storage_path is None:\n", - " index_storage_path = CONFIG[\"index_storage_path\"]\n", - " \n", - " index_path = Path(index_storage_path)\n", - " index_path.mkdir(parents=True, exist_ok=True)\n", - " \n", - " # Check if index already exists\n", - " index_store_file = index_path / \"index_store.json\"\n", - " \n", - " if not force_rebuild and index_store_file.exists():\n", - " print(\"๐Ÿ“ Loading existing multimodal index...\")\n", - " try:\n", - " storage_context = StorageContext.from_defaults(\n", - " persist_dir=str(index_path), \n", - " vector_store=vector_store\n", - " )\n", - " \n", - " index = VectorStoreIndex.from_vector_store(\n", - " vector_store=vector_store,\n", - " storage_context=storage_context\n", - " )\n", - " print(\"โœ“ Successfully loaded existing multimodal index\")\n", - " return index\n", - " \n", - " except Exception as e:\n", - " print(f\"โš ๏ธ Error loading existing index: {e}\")\n", - " print(\"Creating new index...\")\n", - " \n", - " if not documents:\n", - " print(\"โŒ No documents to index\")\n", - " return None\n", - " \n", - " print(\"๐Ÿ”จ Creating new multimodal vector index...\")\n", - " start_time = time.time()\n", - " \n", - " # Create storage context with vector store\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " \n", - " # Create index with progress bar\n", - " index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context, \n", - " show_progress=True\n", - " )\n", - " \n", - " end_time = time.time()\n", - " print(f\"โœ“ Multimodal index created in {end_time - start_time:.2f} seconds\")\n", - " \n", - " # Save index to storage\n", - " print(\"๐Ÿ’พ Saving multimodal index to storage...\")\n", - " index.storage_context.persist(persist_dir=str(index_path))\n", - " print(\"โœ“ Index saved successfully\")\n", - " \n", - " return index\n", - "\n", - "# Create vector store and index for multimodal data\n", - "print(\"๐Ÿš€ Setting up multimodal vector storage...\")\n", - "multimodal_vector_store = create_multimodal_vector_store()\n", - "\n", - "if multimodal_vector_store and documents:\n", - " multimodal_index = create_multimodal_index(\n", - " documents=documents, \n", - " vector_store=multimodal_vector_store,\n", - " force_rebuild=False\n", - " )\n", - " \n", - " if multimodal_index:\n", - " print(\"โœ… Multimodal RAG system ready for cross-modal queries!\")\n", - " else:\n", - " print(\"โŒ Failed to create multimodal index\")\n", - "else:\n", - " print(\"โŒ Vector store creation failed or no documents available\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Multimodal Query Engine and Cross-Modal Search\n", - "\n", - "Now let's create a query engine that can search across all our different data types and demonstrate cross-modal queries.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Multimodal retriever configured to find top 5 similar chunks\n", - "โœ“ Multimodal query engine setup successfully\n", - "๐Ÿš€ Multimodal query engine ready for cross-modal search!\n" - ] - } - ], - "source": [ - "# Query engine setup\n", - "from llama_index.core.query_engine import RetrieverQueryEngine\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "\n", - "def setup_multimodal_query_engine(index, similarity_top_k: int = None):\n", - " \"\"\"Setup query engine for multimodal search.\"\"\"\n", - " if similarity_top_k is None:\n", - " similarity_top_k = CONFIG[\"similarity_top_k\"]\n", - " \n", - " if not index:\n", - " print(\"โŒ Index not available. Please create index first.\")\n", - " return None\n", - " \n", - " try:\n", - " # Create retriever for multimodal search\n", - " retriever = VectorIndexRetriever(\n", - " index=index,\n", - " similarity_top_k=similarity_top_k,\n", - " )\n", - " print(f\"โœ“ Multimodal retriever configured to find top {similarity_top_k} similar chunks\")\n", - " \n", - " # Create query engine\n", - " query_engine = RetrieverQueryEngine(retriever=retriever)\n", - " print(\"โœ“ Multimodal query engine setup successfully\")\n", - " \n", - " return query_engine\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error setting up query engine: {e}\")\n", - " return None\n", - "\n", - "def search_multimodal_documents(query_engine, query: str, include_metadata: bool = True) -> Dict[str, any]:\n", - " \"\"\"Search across multimodal documents and return detailed results.\"\"\"\n", - " if not query_engine:\n", - " return {\n", - " \"success\": False,\n", - " \"error\": \"Query engine not initialized.\",\n", - " \"response\": \"\",\n", - " \"sources\": [],\n", - " }\n", - " \n", - " try:\n", - " print(f\"๐Ÿ” Searching across multimodal data: '{query}'\")\n", - " start_time = time.time()\n", - " \n", - " # Query the multimodal RAG system\n", - " response = query_engine.query(query)\n", - " \n", - " end_time = time.time()\n", - " \n", - " # Extract source information from retrieved nodes\n", - " sources = []\n", - " if hasattr(response, \"source_nodes\"):\n", - " for node in response.source_nodes:\n", - " source_info = {\n", - " \"text\": (\n", - " node.text[:300] + \"...\"\n", - " if len(node.text) > 300\n", - " else node.text\n", - " ),\n", - " \"score\": getattr(node, \"score\", 0.0),\n", - " }\n", - " \n", - " # Add metadata if available and requested\n", - " if include_metadata and hasattr(node, \"metadata\"):\n", - " metadata = node.metadata\n", - " source_info.update({\n", - " \"file_name\": metadata.get(\"file_name\", \"Unknown\"),\n", - " \"file_type\": metadata.get(\"file_type\", \"Unknown\"),\n", - " \"file_path\": metadata.get(\"file_path\", \"Unknown\"),\n", - " \"file_size\": metadata.get(\"file_size\", 0),\n", - " })\n", - " \n", - " sources.append(source_info)\n", - " \n", - " result = {\n", - " \"success\": True,\n", - " \"response\": str(response),\n", - " \"sources\": sources,\n", - " \"query\": query,\n", - " \"search_time\": end_time - start_time,\n", - " \"num_sources\": len(sources),\n", - " }\n", - " \n", - " print(f\"โœ“ Search completed in {end_time - start_time:.2f} seconds\")\n", - " print(f\"๐Ÿ“š Found {len(sources)} relevant sources across different file types\")\n", - " \n", - " return result\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error during search: {e}\")\n", - " return {\"success\": False, \"error\": str(e), \"response\": \"\", \"sources\": []}\n", - "\n", - "# Setup multimodal query engine\n", - "if 'multimodal_index' in locals() and multimodal_index:\n", - " multimodal_query_engine = setup_multimodal_query_engine(multimodal_index)\n", - " \n", - " if multimodal_query_engine:\n", - " print(\"๐Ÿš€ Multimodal query engine ready for cross-modal search!\")\n", - " else:\n", - " print(\"โŒ Failed to setup multimodal query engine\")\n", - "else:\n", - " print(\"โŒ Multimodal index not available\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7. Interactive Multimodal Query Examples\n", - "\n", - "Let's demonstrate the power of our multimodal RAG system with cross-modal queries that search across different data types simultaneously.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐ŸŽฏ Testing Diverse Multimodal Queries\n", - "============================================================\n", - "\n", - "=============== Query 1: What is the prep time for Spag... ===============\n", - "โ“ Multimodal Question: What is the prep time for Spaghetti Carbonara?\n", - "======================================================================\n", - "๐Ÿ” Searching across multimodal data: 'What is the prep time for Spaghetti Carbonara?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:57:24,186 - INFO - query_type :, vector\n", - "2025-09-20 12:57:27,616 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:57:30,526 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 6.71 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different file types\n", - "๐Ÿ’ก Answer:\n", - "15 minutes\n", - "\n", - "๐Ÿ“Š Search completed in 6.71 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different data types\n", - "\n", - "๐Ÿ“ Source File Types: {'Unknown': 1, 'text/csv': 2, 'image/png': 1, 'application/pdf': 1}\n", - "\n", - "๐Ÿ“– Top Sources:\n", - "1. recipe_instructions.md (Unknown)\n", - " Score: 0.675\n", - " Content: # ๐Ÿ Classic Spaghetti Carbonara Recipe\n", - "\n", - "## Ingredients\n", - "- 400g spaghetti pasta\n", - "- 4 large egg yolks\n", - "- 100g pecorino romano cheese (grated)\n", - "- 150g guanci...\n", - "\n", - "2. italian_recipes.csv (text/csv)\n", - " Score: 0.505\n", - " Content: Spaghetti Carbonara, Italian, 20, Easy, Pasta, 450\n", - "Margherita Pizza, Italian, 45, Medium, Tomato, 320\n", - "Risotto Milanese, Italian, 35, Hard, Rice, 380\n", - "T...\n", - "\n", - "3. agent_performance_benchmark.csv (text/csv)\n", - " Score: 0.400\n", - " Content: ReAct-GPT4, reasoning, 0.87, 1200, 45.2, 0.02, langchain\n", - "AutoGPT, autonomous, 0.78, 2100, 78.5, 0.035, autogpt\n", - "LangChain-Agent, tool_using, 0.82, 950,...\n", - "\n", - "\n", - "======================================================================\n", - "\n", - "=============== Query 2: Which stock had the highest re... ===============\n", - "โ“ Multimodal Question: Which stock had the highest return in my portfolio?\n", - "======================================================================\n", - "๐Ÿ” Searching across multimodal data: 'Which stock had the highest return in my portfolio?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:57:31,779 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:57:36,000 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 5.51 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different file types\n", - "๐Ÿ’ก Answer:\n", - "Empty Response\n", - "\n", - "๐Ÿ“Š Search completed in 5.51 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different data types\n", - "\n", - "๐Ÿ“ Source File Types: {'text/csv': 1, 'image/png': 1, 'Unknown': 1, 'application/pdf': 2}\n", - "\n", - "๐Ÿ“– Top Sources:\n", - "1. investment_portfolio.csv (text/csv)\n", - " Score: 0.552\n", - " Content: Stock, AAPL, Apple Inc, 10000, 12500, 25.0, Medium\n", - "Stock, GOOGL, Alphabet Inc, 8000, 9200, 15.0, Medium\n", - "Stock, TSLA, Tesla Inc, 5000, 4200, -16.0, Hig...\n", - "\n", - "2. stock_performance.png (image/png)\n", - " Score: 0.472\n", - " Content: ...\n", - "\n", - "3. market_analysis.md (Unknown)\n", - " Score: 0.449\n", - " Content: # ๐Ÿ“ˆ Q3 2024 Market Analysis Report\n", - "\n", - "## Executive Summary\n", - "\n", - "The third quarter of 2024 showed mixed performance across different asset classes, with tech...\n", - "\n", - "\n", - "======================================================================\n", - "\n", - "=============== Query 3: What is the best time to visit... ===============\n", - "โ“ Multimodal Question: What is the best time to visit Tokyo?\n", - "======================================================================\n", - "๐Ÿ” Searching across multimodal data: 'What is the best time to visit Tokyo?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:57:37,280 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 12:57:43,231 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 7.23 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different file types\n", - "๐Ÿ’ก Answer:\n", - "The best times to visit Tokyo are Marchโ€“May (cherry blossom season) and Septemberโ€“November.\n", - "\n", - "๐Ÿ“Š Search completed in 7.23 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different data types\n", - "\n", - "๐Ÿ“ Source File Types: {'Unknown': 2, 'image/png': 1, 'application/pdf': 2}\n", - "\n", - "๐Ÿ“– Top Sources:\n", - "1. city_guides.md (Unknown)\n", - " Score: 0.545\n", - " Content: # Ultimate City Travel Guide\n", - "\n", - "## Paris, France ๐Ÿ‡ซ๐Ÿ‡ท\n", - "\n", - "**Best Time to Visit:** April-June, September-October\n", - "**Must-See Attractions:**\n", - "- Eiffel Tower - Ic...\n", - "\n", - "2. recipe_instructions.md (Unknown)\n", - " Score: 0.344\n", - " Content: # ๐Ÿ Classic Spaghetti Carbonara Recipe\n", - "\n", - "## Ingredients\n", - "- 400g spaghetti pasta\n", - "- 4 large egg yolks\n", - "- 100g pecorino romano cheese (grated)\n", - "- 150g guanci...\n", - "\n", - "3. city_temperatures.png (image/png)\n", - " Score: 0.337\n", - " Content: ...\n", - "\n", - "\n", - "======================================================================\n", - "\n", - "==================== Custom Question ====================\n", - "โ“ Multimodal Question: What is the prep time for Italian recipes?\n", - "======================================================================\n", - "๐Ÿ” Searching across multimodal data: 'What is the prep time for Italian recipes?'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:57:44,554 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 21.89 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different file types\n", - "๐Ÿ’ก Answer:\n", - "Empty Response\n", - "\n", - "๐Ÿ“Š Search completed in 21.89 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different data types\n", - "\n", - "๐Ÿ“ Source File Types: {'Unknown': 2, 'text/csv': 1, 'image/png': 1, 'audio/mpeg': 1}\n", - "\n", - "๐Ÿ“– Top Sources:\n", - "1. recipe_instructions.md (Unknown)\n", - " Score: 0.572\n", - " Content: # ๐Ÿ Classic Spaghetti Carbonara Recipe\n", - "\n", - "## Ingredients\n", - "- 400g spaghetti pasta\n", - "- 4 large egg yolks\n", - "- 100g pecorino romano cheese (grated)\n", - "- 150g guanci...\n", - "\n", - "2. italian_recipes.csv (text/csv)\n", - " Score: 0.547\n", - " Content: Spaghetti Carbonara, Italian, 20, Easy, Pasta, 450\n", - "Margherita Pizza, Italian, 45, Medium, Tomato, 320\n", - "Risotto Milanese, Italian, 35, Hard, Rice, 380\n", - "T...\n", - "\n", - "3. city_guides.md (Unknown)\n", - " Score: 0.405\n", - " Content: # Ultimate City Travel Guide\n", - "\n", - "## Paris, France ๐Ÿ‡ซ๐Ÿ‡ท\n", - "\n", - "**Best Time to Visit:** April-June, September-October\n", - "**Must-See Attractions:**\n", - "- Eiffel Tower - Ic...\n", - "\n" - ] - } - ], - "source": [ - "def ask_multimodal_question(query_engine, question: str, show_sources: bool = True):\n", - " \"\"\"\n", - " Ask a custom question to the multimodal RAG system and display results.\n", - " \n", - " Args:\n", - " query_engine: The configured multimodal query engine\n", - " question (str): Your question about the multimodal data\n", - " show_sources (bool): Whether to display source information\n", - " \"\"\"\n", - " print(f\"โ“ Multimodal Question: {question}\")\n", - " print(\"=\" * 70)\n", - " \n", - " result = search_multimodal_documents(query_engine, question, include_metadata=True)\n", - " \n", - " if result[\"success\"]:\n", - " print(f\"๐Ÿ’ก Answer:\")\n", - " print(result[\"response\"])\n", - " print(f\"\\n๐Ÿ“Š Search completed in {result['search_time']:.2f} seconds\")\n", - " print(f\"๐Ÿ“š Found {result['num_sources']} relevant sources across different data types\")\n", - " \n", - " if show_sources and result[\"sources\"]:\n", - " # Show file type distribution\n", - " file_types = {}\n", - " for source in result[\"sources\"]:\n", - " file_type = source.get(\"file_type\", \"unknown\")\n", - " if file_type not in file_types:\n", - " file_types[file_type] = 0\n", - " file_types[file_type] += 1\n", - " \n", - " print(f\"\\n๐Ÿ“ Source File Types: {dict(file_types)}\")\n", - " \n", - " print(f\"\\n๐Ÿ“– Top Sources:\")\n", - " for i, source in enumerate(result[\"sources\"][:3], 1):\n", - " print(f\"{i}. {source.get('file_name', 'Unknown')} ({source.get('file_type', 'Unknown')})\")\n", - " print(f\" Score: {source.get('score', 0):.3f}\")\n", - " print(f\" Content: {source['text'][:150]}...\")\n", - " print()\n", - " \n", - " else:\n", - " print(f\"โŒ Error: {result['error']}\")\n", - "\n", - "# # Example multimodal queries\n", - "# multimodal_queries = [\n", - "# # \"What are the performance benchmarks for different AI agents?\",\n", - "# # \"How do I configure a ReAct agent for research tasks?\", \n", - "# # \"What are the architectural patterns discussed in the agent frameworks?\",\n", - "# # \"Which AI agent has the best accuracy score?\",\n", - "# # \"What are the cost implications of different agent models?\"\n", - "# \"What is the accuracy_score for the ReAct agent?\"\n", - "# ]\n", - "\n", - "# print(\"๐ŸŽฏ Multimodal Query Demonstrations\")\n", - "# print(\"=\" * 60)\n", - "\n", - "# # Run a few example queries\n", - "# for i, question in enumerate(multimodal_queries[:3], 1):\n", - "# print(f\"\\n{'='*20} Example {i} {'='*20}\")\n", - " \n", - "# if 'multimodal_query_engine' in locals() and multimodal_query_engine:\n", - "# ask_multimodal_question(multimodal_query_engine, question, show_sources=True)\n", - "# else:\n", - "# print(\"โŒ Multimodal query engine not available\")\n", - " \n", - "# if i < 3:\n", - "# print(\"\\n\" + \"=\"*60)\n", - "\n", - "# Demo queries for diverse data types\n", - "diverse_queries = [\n", - " \"What is the prep time for Spaghetti Carbonara?\", # Should hit cooking CSV\n", - " \"Which stock had the highest return in my portfolio?\", # Should hit finance CSV\n", - " \"What is the best time to visit Tokyo?\", # Should hit travel markdown\n", - " \"How many calories did I burn on Tuesday?\", # Should hit health HTML\n", - " \"What are the steps to make Carbonara?\", # Should hit cooking markdown\n", - " \"What was NVIDIA's performance?\", # Should hit finance data\n", - "]\n", - "\n", - "print(\"๐ŸŽฏ Testing Diverse Multimodal Queries\")\n", - "print(\"=\" * 60)\n", - "\n", - "# Test one query from each topic\n", - "for i, question in enumerate(diverse_queries[:3], 1):\n", - " print(f\"\\n{'='*15} Query {i}: {question[:30]}... {'='*15}\")\n", - " ask_multimodal_question(multimodal_query_engine, question, show_sources=True)\n", - " print(\"\\n\" + \"=\"*70)\n", - "\n", - "# Custom question area\n", - "print(f\"\\n{'='*20} Custom Question {'='*20}\")\n", - "custom_question = \"What is the prep time for Italian recipes?\"\n", - "ask_multimodal_question(multimodal_query_engine, custom_question, show_sources=True)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โ“ Multimodal Question: in the end it doesn't even matter\n", - "======================================================================\n", - "๐Ÿ” Searching across multimodal data: 'in the end it doesn't even matter'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:58:16,704 - INFO - query_type :, vector\n", - "2025-09-20 12:58:18,223 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Search completed in 7.48 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different file types\n", - "๐Ÿ’ก Answer:\n", - "Empty Response\n", - "\n", - "๐Ÿ“Š Search completed in 7.48 seconds\n", - "๐Ÿ“š Found 5 relevant sources across different data types\n", - "\n", - "๐Ÿ“ Source File Types: {'audio/mpeg': 2, 'application/pdf': 2, 'text/csv': 1}\n", - "\n", - "๐Ÿ“– Top Sources:\n", - "1. in_the_end.mp3 (audio/mpeg)\n", - " Score: 0.433\n", - " Content: I tried so hard and got so far In the end, it doesn't even matter I had to fall to lose it all In the end, it doesn't even matter...\n", - "\n", - "2. Emerging_Agent_Architectures.pdf (application/pdf)\n", - " Score: 0.375\n", - " Content: Message subscribing or filtering improves multi-agent\n", - "performance by ensuring agents only receive information relevant to their tasks.\n", - "In vertical arc...\n", - "\n", - "3. Emerging_Agent_Architectures.pdf (application/pdf)\n", - " Score: 0.368\n", - " Content: complete problems [16, 23, 32]. They often do this by breaking a larger problem into smaller subproblems, and then\n", - "solving each one with the appropria...\n", - "\n" - ] - } - ], - "source": [ - "custom_question=\"in the end it doesn't even matter\"\n", - "ask_multimodal_question(multimodal_query_engine, custom_question, show_sources=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conclusion\n", - "\n", - "๐ŸŽ‰ **Congratulations!** You have successfully built an advanced **Multimodal RAG System** using LlamaIndex's `SimpleDirectoryReader` with comprehensive cross-modal capabilities.\n", - "\n", - "### What We Accomplished\n", - "\n", - "This tutorial demonstrated building a RAG system that can handle multiple data types:\n", - "\n", - "#### 1. **Multimodal Document Loading**\n", - "- โœ… **PDF Documents**: Academic research papers on AI agents\n", - "- โœ… **CSV Files**: Agent performance benchmarks and evaluation metrics \n", - "- โœ… **Markdown Files**: Framework comparisons and documentation\n", - "- โœ… **HTML Files**: Tutorial and instructional content\n", - "- โœ… **Image Files**: Charts, diagrams, and visual content\n", - "- โœ… **Audio Files**: Supplementary audio content\n", - "\n", - "#### 2. **Key Features Implemented**\n", - "- โœ… **Hardcoded Configuration**: No external config files needed\n", - "- โœ… **Cross-Modal Search**: Query across all file types simultaneously\n", - "- โœ… **Semantic Similarity**: Find relevant content regardless of source format\n", - "- โœ… **Source Attribution**: Track which file types contributed to answers\n", - "- โœ… **LanceDB Vector Store**: Efficient multimodal document storage\n", - "- โœ… **OpenRouter Integration**: Using `gpt-4o` for response generation\n", - "- โœ… **Local Embeddings**: `BAAI/bge-small-en-v1.5` for cost-effective embedding\n", - "\n", - "#### 3. **SimpleDirectoryReader Capabilities**\n", - "According to the [official documentation](https://developers.llamaindex.ai/python/framework/module_guides/loading/simpledirectoryreader/), we successfully utilized:\n", - "\n", - "```python\n", - "# Basic multimodal loading\n", - "SimpleDirectoryReader(input_dir=\"../../data\", recursive=True)\n", - "\n", - "# Advanced features available\n", - "SimpleDirectoryReader(\n", - " input_dir=\"path/to/directory\",\n", - " recursive=True, # Search subdirectories\n", - " required_exts=[\".pdf\", \".csv\"], # Filter file types\n", - " exclude=[\"file1.txt\"], # Exclude specific files\n", - " file_metadata=custom_func, # Custom metadata extraction\n", - " num_files_limit=100, # Limit number of files\n", - " encoding=\"utf-8\" # Specify encoding\n", - ")\n", - "```\n", - "\n", - "### Real-World Applications\n", - "\n", - "This multimodal RAG system can be applied to:\n", - "\n", - "- **Research and Academia**: Query across papers, datasets, and supplementary materials\n", - "- **Documentation Systems**: Search technical docs, tutorials, configs, and examples\n", - "- **Business Intelligence**: Combine reports, spreadsheets, presentations, and recordings\n", - "- **Content Management**: Organize and search diverse content libraries\n", - "- **Knowledge Bases**: Build comprehensive Q&A systems with diverse source materials\n", - "\n", - "### Next Steps\n", - "\n", - "1. **Extend File Types**: Add `.docx`, `.pptx`, `.epub` support\n", - "2. **Custom Metadata**: Implement domain-specific metadata extraction\n", - "3. **Hybrid Search**: Combine vector search with keyword search\n", - "4. **Performance Optimization**: Use iterative loading for large datasets\n", - "5. **Multi-Language Support**: Test with international documents\n", - "\n", - "### Usage Tips\n", - "\n", - "- **Query Optimization**: Use specific queries that benefit from cross-modal information\n", - "- **File Organization**: Structure data directories logically\n", - "- **Custom Questions**: Modify the `custom_question` variable to test your own queries\n", - "- **Monitor Sources**: Check file type distribution in results to understand retrieval patterns\n", - "\n", - "Happy building with multimodal RAG! ๐Ÿš€๐Ÿ“š๐Ÿ”\n", - "\n", - "---\n", - "\n", - "**Ready to explore?** Run the cells above and try your own questions with the interactive query interface!\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "accelerator", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/03_advanced_rag_techniques.ipynb b/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/03_advanced_rag_techniques.ipynb deleted file mode 100644 index b98f810..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/llamaindex_rag/03_advanced_rag_techniques.ipynb +++ /dev/null @@ -1,1928 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Advanced RAG Techniques with LlamaIndex\n", - "\n", - "This notebook demonstrates sophisticated RAG techniques that transform basic document retrieval into production-ready, intelligent systems. We'll explore techniques that solve real-world challenges like noisy retrieval, inconsistent response quality, and unstructured outputs.\n", - "\n", - "## Why Advanced RAG Techniques Matter\n", - "\n", - "**Basic RAG limitations:**\n", - "- Retrieves irrelevant chunks (low precision)\n", - "- Inconsistent response quality across queries\n", - "- No control over response structure\n", - "- Difficulty handling complex, multi-part questions\n", - "- Poor performance on domain-specific tasks\n", - "\n", - "**Advanced techniques solve these by adding:**\n", - "- Intelligent filtering and reranking\n", - "- Sophisticated response synthesis strategies\n", - "- Type-safe, structured outputs\n", - "- Domain-specific customization\n", - "\n", - "## Advanced Concepts Covered\n", - "\n", - "### ๐Ÿ”ง [Node Postprocessors](https://developers.llamaindex.ai/python/framework/module_guides/querying/node_postprocessors/)\n", - "**Purpose**: Refine retrieval results after initial vector search\n", - "- **Similarity Filtering**: Remove chunks below relevance threshold (essential for noisy datasets)\n", - "- **Reranking**: Re-order results using specialized models (improves precision by 20-40%)\n", - "- **Custom Filtering**: Apply business rules (exclude sensitive content, enforce data freshness)\n", - "- **Use Case**: Clean up retrieval for production systems where precision matters\n", - "\n", - "### ๐ŸŽฏ [Response Synthesizers](https://developers.llamaindex.ai/python/framework/module_guides/querying/response_synthesizers/)\n", - "**Purpose**: Control how retrieved information becomes final answers\n", - "- **Tree Summarize**: Handle complex queries by building responses hierarchically (best for analytical questions)\n", - "- **Refine**: Iteratively improve answers with multiple information sources (comprehensive analysis)\n", - "- **Compact**: Optimize token usage while maintaining quality (cost-effective production)\n", - "- **Custom Templates**: Domain-specific response formatting (consistency across use cases)\n", - "- **Use Case**: Ensure response quality matches business requirements and user expectations\n", - "\n", - "### ๐Ÿ” [Advanced Retrievers](https://developers.llamaindex.ai/python/framework/module_guides/querying/retriever/)\n", - "**Purpose**: Go beyond simple vector similarity for better information discovery\n", - "- **Hybrid Search**: Combine semantic similarity with keyword matching (captures exact terms + meaning)\n", - "- **Multi-Index Retrieval**: Query multiple specialized indexes simultaneously (comprehensive coverage)\n", - "- **Auto-Merging**: Intelligently combine related chunks (context preservation)\n", - "- **Use Case**: Handle diverse query types and improve recall on complex information needs\n", - "\n", - "### ๐Ÿ“Š [Structured Outputs](https://developers.llamaindex.ai/python/framework/module_guides/querying/structured_outputs/)\n", - "**Purpose**: Ensure predictable, parseable responses for system integration\n", - "- **Pydantic Models**: Type-safe data extraction with validation (eliminates parsing errors)\n", - "- **JSON Schema**: Consistent response formatting (enables downstream processing)\n", - "- **Multi-Field Extraction**: Extract multiple data points simultaneously (efficient for complex entities)\n", - "- **Use Case**: API endpoints, data pipelines, and applications requiring reliable structured data\n", - "\n", - "---\n", - "\n", - "We'll use our diverse multimodal dataset (cooking, finance, travel, health, AI research) to demonstrate how these techniques work across different data types and use cases, showing measurable improvements over basic RAG.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Environment Setup and Data Loading\n", - "\n", - "**Purpose**: Configure optimal settings for advanced RAG techniques and load a diverse dataset for comprehensive testing.\n", - "\n", - "**Why This Matters**: Advanced techniques require careful parameter tuning. We use smaller chunk sizes (512 vs 1024) for better precision, higher retrieval counts (10 vs 5) for better postprocessing, and local embeddings to reduce costs during experimentation.\n", - "\n", - "**Configuration Strategy**:\n", - "- **Smaller chunks** โ†’ Better precision for complex queries\n", - "- **Higher retrieval counts** โ†’ More candidates for intelligent filtering\n", - "- **Local embeddings** โ†’ Cost-effective development and testing\n", - "- **Multimodal dataset** โ†’ Test techniques across different content types\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install -r \"../requirements.txt\"" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Advanced RAG environment configured\n", - "โœ“ LLM Model: gpt-5-mini\n", - "โœ“ Embedding Model: local:BAAI/bge-small-en-v1.5\n", - "โœ“ Chunk Size: 512 (optimized for precision)\n", - "โœ“ Initial Retrieval: 10 candidates\n", - "โœ“ Final Results: 5 after postprocessing\n", - "๐Ÿš€ Ready for advanced RAG demonstrations!\n" - ] - } - ], - "source": [ - "# Environment setup with advanced configurations\n", - "import os\n", - "import time\n", - "from pathlib import Path\n", - "from typing import Dict, List, Optional, Tuple, Any\n", - "import pandas as pd\n", - "import json\n", - "from pydantic import BaseModel, Field\n", - "from enum import Enum\n", - "\n", - "from dotenv import load_dotenv\n", - "\n", - "# Advanced configuration for sophisticated RAG\n", - "CONFIG = {\n", - " \"llm_model\": \"gpt-5-mini\",\n", - " \"embedding_model\": \"local:BAAI/bge-small-en-v1.5\",\n", - " \"chunk_size\": 512, # Smaller chunks for better precision\n", - " \"chunk_overlap\": 50,\n", - " \"similarity_top_k\": 10, # More candidates for postprocessing\n", - " \"final_top_k\": 5, # Final results after postprocessing\n", - " \"similarity_cutoff\": 0.3, # Filter low-relevance results\n", - " \"data_path\": \"../data\",\n", - " \"vector_db_path\": \"storage/advanced_vectordb\",\n", - " \"index_storage_path\": \"storage/advanced_index\"\n", - "}\n", - "\n", - "def setup_advanced_environment():\n", - " \"\"\"Setup environment for advanced RAG techniques.\"\"\"\n", - " load_dotenv()\n", - " os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n", - " \n", - " api_key = os.getenv(\"OPENROUTER_API_KEY\")\n", - " if not api_key:\n", - " print(\"โš ๏ธ OPENROUTER_API_KEY not found in environment variables\")\n", - " return False\n", - " \n", - " print(\"โœ“ Advanced RAG environment configured\")\n", - " print(f\"โœ“ LLM Model: {CONFIG['llm_model']}\")\n", - " print(f\"โœ“ Embedding Model: {CONFIG['embedding_model']}\")\n", - " print(f\"โœ“ Chunk Size: {CONFIG['chunk_size']} (optimized for precision)\")\n", - " print(f\"โœ“ Initial Retrieval: {CONFIG['similarity_top_k']} candidates\")\n", - " print(f\"โœ“ Final Results: {CONFIG['final_top_k']} after postprocessing\")\n", - " return True\n", - "\n", - "# Initialize environment\n", - "success = setup_advanced_environment()\n", - "if success:\n", - " print(\"๐Ÿš€ Ready for advanced RAG demonstrations!\")\n", - "else:\n", - " print(\"โŒ Environment setup failed!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. LlamaIndex Advanced Configuration\n", - "\n", - "**Purpose**: Set up LlamaIndex with precision-optimized settings that maximize the effectiveness of advanced techniques.\n", - "\n", - "**Key Optimizations**:\n", - "- **`chunk_size=512`**: Smaller chunks provide more precise context for postprocessors\n", - "- **`chunk_overlap=50`**: Minimal overlap reduces redundancy while preserving context\n", - "- **`similarity_top_k=10`**: More candidates allow postprocessors to filter intelligently\n", - "- **`final_top_k=5`**: Refined results after advanced processing\n", - "\n", - "**Why These Settings Matter**: Advanced techniques work best with more retrieval candidates to filter and refine. The smaller chunk size ensures each piece of retrieved information is highly relevant, while higher retrieval counts give postprocessors room to improve precision.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ LLM configured: gpt-5-mini\n", - "โœ“ Embedding model: local:BAAI/bge-small-en-v1.5\n", - "โœ“ Node parser: 512 chars, 50 overlap\n", - "\n", - "๐Ÿ“‚ Loading multimodal dataset...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n", - "/Users/ishandutta/miniconda3/envs/accelerator/lib/python3.11/site-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n", - " warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Loaded 42 documents in 8.00s\n", - "\n", - "๐Ÿ“Š Document Types:\n", - " application/pdf: 23 documents\n", - " audio/mpeg: 3 documents\n", - " image/png: 6 documents\n", - " text/csv: 4 documents\n", - " text/html: 2 documents\n", - " unknown: 4 documents\n", - "\n", - "โœ… Advanced configuration complete!\n" - ] - } - ], - "source": [ - "# Advanced LlamaIndex configuration\n", - "from llama_index.core import Settings, SimpleDirectoryReader\n", - "from llama_index.llms.openrouter import OpenRouter\n", - "from llama_index.core.embeddings import resolve_embed_model\n", - "from llama_index.core.node_parser import SentenceSplitter\n", - "\n", - "def configure_advanced_settings():\n", - " \"\"\"Configure LlamaIndex for advanced RAG techniques.\"\"\"\n", - " \n", - " # LLM configuration\n", - " Settings.llm = OpenRouter(\n", - " api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n", - " model=CONFIG[\"llm_model\"]\n", - " )\n", - " print(f\"โœ“ LLM configured: {CONFIG['llm_model']}\")\n", - "\n", - " # Embedding configuration\n", - " Settings.embed_model = resolve_embed_model(CONFIG[\"embedding_model\"])\n", - " print(f\"โœ“ Embedding model: {CONFIG['embedding_model']}\")\n", - "\n", - " # Optimized node parser for better precision\n", - " Settings.node_parser = SentenceSplitter(\n", - " chunk_size=CONFIG[\"chunk_size\"], \n", - " chunk_overlap=CONFIG[\"chunk_overlap\"]\n", - " )\n", - " print(f\"โœ“ Node parser: {CONFIG['chunk_size']} chars, {CONFIG['chunk_overlap']} overlap\")\n", - "\n", - "# Configure settings\n", - "configure_advanced_settings()\n", - "\n", - "# Load our diverse multimodal dataset\n", - "print(\"\\n๐Ÿ“‚ Loading multimodal dataset...\")\n", - "reader = SimpleDirectoryReader(\n", - " input_dir=CONFIG[\"data_path\"],\n", - " recursive=True\n", - ")\n", - "\n", - "start_time = time.time()\n", - "documents = reader.load_data()\n", - "load_time = time.time() - start_time\n", - "\n", - "print(f\"โœ… Loaded {len(documents)} documents in {load_time:.2f}s\")\n", - "\n", - "# Analyze document types\n", - "doc_types = {}\n", - "for doc in documents:\n", - " file_type = doc.metadata.get('file_type', 'unknown')\n", - " doc_types[file_type] = doc_types.get(file_type, 0) + 1\n", - "\n", - "print(\"\\n๐Ÿ“Š Document Types:\")\n", - "for file_type, count in sorted(doc_types.items()):\n", - " print(f\" {file_type}: {count} documents\")\n", - "\n", - "print(\"\\nโœ… Advanced configuration complete!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Advanced Vector Index Creation\n", - "\n", - "**Purpose**: Build a vector index foundation that supports sophisticated retrieval and postprocessing techniques.\n", - "\n", - "**Advanced Index Features**:\n", - "- **Optimized Chunking**: Smaller, more focused text segments for precise retrieval\n", - "- **LanceDB Backend**: High-performance vector storage with advanced query capabilities\n", - "- **StorageContext Persistence**: Complete index state preservation for reproducible results\n", - "- **Multimodal Support**: Handles diverse content types (PDFs, images, audio, structured data)\n", - "\n", - "**Why This Index Design Matters**: Advanced techniques like postprocessors and sophisticated synthesizers require high-quality retrieval as a foundation. Our index creates many small, precise chunks that can be intelligently filtered and combined by advanced techniques, rather than fewer large chunks that may contain irrelevant information.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 12:59:36,643 - WARNING - Table advanced_multimodal doesn't exist yet. Please add some data to create it.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Setting up advanced vector index...\n", - "โœ“ Advanced vector store created\n", - "๐Ÿ”จ Creating new advanced index...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Parsing nodes: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 42/42 [00:00<00:00, 185.67it/s]\n", - "Generating embeddings: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 94/94 [00:03<00:00, 24.66it/s]\n", - "2025-09-20 12:59:40,695 - INFO - Create new table advanced_multimodal adding data.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ“ Index created in 4.06s\n", - "๐Ÿ’พ Index saved to storage\n", - "โœ… Advanced index ready for sophisticated queries!\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[90m[\u001b[0m2025-09-20T07:29:40Z \u001b[33mWARN \u001b[0m lance::dataset::write::insert\u001b[90m]\u001b[0m No existing dataset at /Users/ishandutta/Documents/code/ai-accelerator/Day_6/session_2/llamaindex_rag/storage/advanced_vectordb/advanced_multimodal.lance, it will be created\n" - ] - } - ], - "source": [ - "# Advanced vector store and index creation\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "from llama_index.core import StorageContext, VectorStoreIndex\n", - "\n", - "def create_advanced_vector_index():\n", - " \"\"\"Create optimized vector index for advanced techniques.\"\"\"\n", - " \n", - " # Create vector store\n", - " try:\n", - " import lancedb\n", - " \n", - " # Setup storage\n", - " Path(CONFIG[\"vector_db_path\"]).parent.mkdir(parents=True, exist_ok=True)\n", - " db = lancedb.connect(str(CONFIG[\"vector_db_path\"]))\n", - " \n", - " vector_store = LanceDBVectorStore(\n", - " uri=str(CONFIG[\"vector_db_path\"]), \n", - " table_name=\"advanced_multimodal\"\n", - " )\n", - " print(f\"โœ“ Advanced vector store created\")\n", - " \n", - " # Check for existing index\n", - " index_path = Path(CONFIG[\"index_storage_path\"])\n", - " index_path.mkdir(parents=True, exist_ok=True)\n", - " \n", - " if (index_path / \"index_store.json\").exists():\n", - " print(\"๐Ÿ“ Loading existing advanced index...\")\n", - " storage_context = StorageContext.from_defaults(\n", - " persist_dir=str(index_path), \n", - " vector_store=vector_store\n", - " )\n", - " index = VectorStoreIndex.from_vector_store(\n", - " vector_store=vector_store,\n", - " storage_context=storage_context\n", - " )\n", - " print(\"โœ“ Existing index loaded successfully\")\n", - " else:\n", - " print(\"๐Ÿ”จ Creating new advanced index...\")\n", - " storage_context = StorageContext.from_defaults(vector_store=vector_store)\n", - " \n", - " start_time = time.time()\n", - " index = VectorStoreIndex.from_documents(\n", - " documents, \n", - " storage_context=storage_context, \n", - " show_progress=True\n", - " )\n", - " index_time = time.time() - start_time\n", - " \n", - " print(f\"โœ“ Index created in {index_time:.2f}s\")\n", - " \n", - " # Persist index\n", - " index.storage_context.persist(persist_dir=str(index_path))\n", - " print(\"๐Ÿ’พ Index saved to storage\")\n", - " \n", - " return index, vector_store\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error creating advanced index: {e}\")\n", - " return None, None\n", - "\n", - "# Create the advanced index\n", - "print(\"๐Ÿš€ Setting up advanced vector index...\")\n", - "advanced_index, advanced_vector_store = create_advanced_vector_index()\n", - "\n", - "if advanced_index:\n", - " print(\"โœ… Advanced index ready for sophisticated queries!\")\n", - "else:\n", - " print(\"โŒ Failed to create advanced index\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Node Postprocessors - Intelligent Result Filtering\n", - "\n", - "**The Problem**: Vector search often returns chunks with varying relevance quality. Some may be tangentially related, contain outdated information, or include unwanted content. Raw vector similarity doesn't account for business rules or content quality.\n", - "\n", - "**The Solution**: Node postprocessors act as intelligent filters that run after vector retrieval, applying sophisticated logic to improve result quality.\n", - "\n", - "**Key Postprocessor Types**:\n", - "\n", - "### ๐ŸŽฏ SimilarityPostprocessor\n", - "- **Purpose**: Remove chunks below a relevance threshold\n", - "- **When to Use**: Always in production (minimal cost, significant improvement)\n", - "- **Impact**: Typically improves precision by 15-30% by removing noise\n", - "- **Best Practice**: Start with 0.3 threshold, tune based on your data\n", - "\n", - "### ๐Ÿ” KeywordNodePostprocessor \n", - "- **Purpose**: Filter based on required/excluded terms\n", - "- **When to Use**: Domain-specific filtering (remove sensitive content, ensure topic focus)\n", - "- **Impact**: Ensures responses stay on-topic and comply with business rules\n", - "- **Best Practice**: Use exclude lists for sensitive terms, required lists for focus\n", - "\n", - "### ๐Ÿ”„ Multi-Stage Processing\n", - "- **Purpose**: Chain multiple filters for comprehensive refinement\n", - "- **When to Use**: Production systems requiring high precision\n", - "- **Impact**: Combines benefits of multiple filtering strategies\n", - "- **Best Practice**: Order from general (similarity) to specific (keyword) filters\n", - "\n", - "**Real-World Impact**: Postprocessors typically improve user satisfaction by 25-40% by reducing irrelevant information in responses, while adding minimal latency (50-200ms) and cost.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ”ง Node Postprocessor Demonstrations\n", - "============================================================\n", - "\n", - "1๏ธโƒฃ Similarity Postprocessor - Relevance Filtering\n", - "--------------------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:00:21,455 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“ฅ Raw retrieval: 10 nodes\n", - "๐Ÿ” After similarity filter (>0.3): 10 nodes\n", - "๐Ÿ“Š Removed 0 low-relevance nodes\n", - "๐Ÿ“ˆ Score range: 0.377 - 0.690\n", - "\n", - "2๏ธโƒฃ Keyword Postprocessor - Content Filtering\n", - "--------------------------------------------------\n", - "๐Ÿ“ฅ Raw retrieval: 10 nodes\n", - "๐Ÿ” After keyword filter: 1 nodes\n", - "๐Ÿ“Š Removed 9 nodes without required keywords\n", - "\n", - "3๏ธโƒฃ Combined Postprocessors - Multi-Stage Filtering\n", - "--------------------------------------------------\n", - "โœ“ Multi-stage postprocessing pipeline created\n", - " Stage 1: Similarity filtering (>0.2)\n", - " Stage 2: Keyword exclusion (no 'agent' or 'framework')\n", - "\n", - "โœ… Postprocessor demonstrations complete!\n" - ] - } - ], - "source": [ - "# Node Postprocessors for intelligent filtering\n", - "from llama_index.core.postprocessor import (\n", - " SimilarityPostprocessor,\n", - " KeywordNodePostprocessor\n", - ")\n", - "from llama_index.core.retrievers import VectorIndexRetriever\n", - "from llama_index.core.query_engine import RetrieverQueryEngine\n", - "\n", - "def demonstrate_postprocessors():\n", - " \"\"\"Demonstrate different node postprocessor techniques.\"\"\"\n", - " \n", - " print(\"๐Ÿ”ง Node Postprocessor Demonstrations\")\n", - " print(\"=\" * 60)\n", - " \n", - " # 1. Similarity Postprocessor - Filter by relevance score\n", - " print(\"\\n1๏ธโƒฃ Similarity Postprocessor - Relevance Filtering\")\n", - " print(\"-\" * 50)\n", - " \n", - " similarity_processor = SimilarityPostprocessor(\n", - " similarity_cutoff=CONFIG[\"similarity_cutoff\"]\n", - " )\n", - " \n", - " # Create retriever with similarity filtering\n", - " similarity_retriever = VectorIndexRetriever(\n", - " index=advanced_index,\n", - " similarity_top_k=CONFIG[\"similarity_top_k\"]\n", - " )\n", - " \n", - " # Test similarity filtering\n", - " test_query = \"What are the ingredients for Spaghetti Carbonara?\"\n", - " raw_nodes = similarity_retriever.retrieve(test_query)\n", - " filtered_nodes = similarity_processor.postprocess_nodes(raw_nodes)\n", - " \n", - " print(f\"๐Ÿ“ฅ Raw retrieval: {len(raw_nodes)} nodes\")\n", - " print(f\"๐Ÿ” After similarity filter (>{CONFIG['similarity_cutoff']}): {len(filtered_nodes)} nodes\")\n", - " print(f\"๐Ÿ“Š Removed {len(raw_nodes) - len(filtered_nodes)} low-relevance nodes\")\n", - " \n", - " # Show score distribution\n", - " if filtered_nodes:\n", - " scores = [getattr(node, 'score', 0) for node in filtered_nodes]\n", - " print(f\"๐Ÿ“ˆ Score range: {min(scores):.3f} - {max(scores):.3f}\")\n", - " \n", - " # 2. Keyword Postprocessor - Filter by required/excluded terms\n", - " print(\"\\n2๏ธโƒฃ Keyword Postprocessor - Content Filtering\")\n", - " print(\"-\" * 50)\n", - " \n", - " keyword_processor = KeywordNodePostprocessor(\n", - " required_keywords=[\"Italian\", \"recipe\"], # Must contain these\n", - " exclude_keywords=[\"agent\", \"AI\"] # Must not contain these\n", - " )\n", - " \n", - " keyword_filtered = keyword_processor.postprocess_nodes(raw_nodes)\n", - " print(f\"๐Ÿ“ฅ Raw retrieval: {len(raw_nodes)} nodes\")\n", - " print(f\"๐Ÿ” After keyword filter: {len(keyword_filtered)} nodes\")\n", - " print(f\"๐Ÿ“Š Removed {len(raw_nodes) - len(keyword_filtered)} nodes without required keywords\")\n", - " \n", - " # 3. Combined Postprocessors - Chain multiple filters\n", - " print(\"\\n3๏ธโƒฃ Combined Postprocessors - Multi-Stage Filtering\")\n", - " print(\"-\" * 50)\n", - " \n", - " # Create query engine with multiple postprocessors\n", - " combined_query_engine = advanced_index.as_query_engine(\n", - " similarity_top_k=CONFIG[\"similarity_top_k\"],\n", - " node_postprocessors=[\n", - " SimilarityPostprocessor(similarity_cutoff=0.2), # First filter by relevance\n", - " KeywordNodePostprocessor(exclude_keywords=[\"agent\", \"framework\"]) # Then filter content\n", - " ]\n", - " )\n", - " \n", - " print(\"โœ“ Multi-stage postprocessing pipeline created\")\n", - " print(\" Stage 1: Similarity filtering (>0.2)\")\n", - " print(\" Stage 2: Keyword exclusion (no 'agent' or 'framework')\")\n", - " \n", - " return {\n", - " 'similarity_engine': RetrieverQueryEngine(\n", - " retriever=similarity_retriever,\n", - " node_postprocessors=[similarity_processor]\n", - " ),\n", - " 'combined_engine': combined_query_engine\n", - " }\n", - "\n", - "# Demonstrate postprocessors\n", - "if advanced_index:\n", - " postprocessor_engines = demonstrate_postprocessors()\n", - " print(\"\\nโœ… Postprocessor demonstrations complete!\")\n", - "else:\n", - " print(\"โŒ Cannot demonstrate postprocessors without index\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Response Synthesizers - Advanced Response Generation\n", - "\n", - "**The Problem**: After retrieving relevant chunks, how do you combine them into a coherent, comprehensive answer? Basic concatenation leads to repetitive, poorly structured responses that don't match user expectations or business requirements.\n", - "\n", - "**The Solution**: Response synthesizers use sophisticated strategies to transform retrieved chunks into well-structured, contextually appropriate answers.\n", - "\n", - "**Synthesis Strategies Compared**:\n", - "\n", - "### ๐ŸŒณ TreeSummarize\n", - "- **How it Works**: Builds responses hierarchically, summarizing chunks in groups\n", - "- **Best For**: Complex analytical questions requiring deep understanding\n", - "- **Advantages**: Handles large context well, reduces information loss\n", - "- **Trade-offs**: Higher latency (3-8s), more token usage\n", - "- **Use Case**: Research analysis, detailed explanations, comprehensive summaries\n", - "\n", - "### ๐Ÿ”„ Refine\n", - "- **How it Works**: Iteratively improves answer by incorporating new information\n", - "- **Best For**: Questions requiring synthesis from multiple sources\n", - "- **Advantages**: Comprehensive answers, good information integration\n", - "- **Trade-offs**: Highest latency, most token usage\n", - "- **Use Case**: Comparative analysis, multi-faceted questions\n", - "\n", - "### ๐Ÿ“ฆ CompactAndRefine\n", - "- **How it Works**: Optimizes token usage while maintaining refinement benefits\n", - "- **Best For**: Production systems balancing quality and cost\n", - "- **Advantages**: Better token efficiency than Refine, good quality\n", - "- **Trade-offs**: Moderate latency, balanced cost\n", - "- **Use Case**: Cost-conscious production deployments\n", - "\n", - "### โšก SimpleSummarize\n", - "- **How it Works**: Direct synthesis with custom templates\n", - "- **Best For**: Fast, straightforward questions with known patterns\n", - "- **Advantages**: Lowest latency, minimal cost, predictable format\n", - "- **Trade-offs**: Less sophisticated reasoning\n", - "- **Use Case**: FAQ systems, simple factual queries\n", - "\n", - "**Performance Comparison**:\n", - "| Strategy | Latency | Token Usage | Quality | Best Use Case |\n", - "|----------|---------|-------------|---------|---------------|\n", - "| Tree | High | High | Excellent | Complex analysis |\n", - "| Refine | Highest | Highest | Excellent | Multi-source synthesis |\n", - "| Compact | Medium | Medium | Good | Production balance |\n", - "| Simple | Low | Low | Good | Fast responses |\n", - "\n", - "**Pro Tip**: Match synthesizer to query complexity - use Simple for facts, Tree for analysis.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐ŸŽฏ Response Synthesizer Demonstrations\n", - "============================================================\n", - "\n", - "1๏ธโƒฃ Tree Summarize - Hierarchical Information Building\n", - "--------------------------------------------------\n", - "โœ“ Tree Summarize engine created\n", - " - Builds responses hierarchically\n", - " - Optimal for complex, multi-part questions\n", - " - Uses cooking-specific prompt template\n", - "\n", - "2๏ธโƒฃ Refine - Iterative Response Improvement\n", - "--------------------------------------------------\n", - "โœ“ Refine engine created\n", - " - Iteratively improves responses\n", - " - Great for comprehensive answers\n", - " - Incorporates multiple information sources\n", - "\n", - "3๏ธโƒฃ Compact and Refine - Token-Optimized Processing\n", - "--------------------------------------------------\n", - "โœ“ Compact and Refine engine created\n", - " - Optimized for token efficiency\n", - " - Uses financial analysis template\n", - " - Balances quality and cost\n", - "\n", - "4๏ธโƒฃ Simple Summarize - Direct Response Generation\n", - "--------------------------------------------------\n", - "โœ“ Simple Summarize engine created\n", - " - Direct, straightforward responses\n", - " - Uses travel-specific template\n", - " - Fast and efficient\n", - "\n", - "โœ… Response synthesizer demonstrations complete!\n" - ] - } - ], - "source": [ - "# Response Synthesizers for advanced response generation\n", - "from llama_index.core.response_synthesizers import (\n", - " TreeSummarize,\n", - " Refine,\n", - " CompactAndRefine,\n", - " SimpleSummarize\n", - ")\n", - "from llama_index.core.prompts import PromptTemplate\n", - "\n", - "def demonstrate_response_synthesizers():\n", - " \"\"\"Demonstrate different response synthesis techniques.\"\"\"\n", - " \n", - " print(\"๐ŸŽฏ Response Synthesizer Demonstrations\")\n", - " print(\"=\" * 60)\n", - " \n", - " # Custom prompt templates for different synthesis modes\n", - " cooking_template = PromptTemplate(\n", - " \"You are a professional chef assistant. Based on the provided cooking information:\\n\"\n", - " \"{context_str}\\n\\n\"\n", - " \"Question: {query_str}\\n\\n\"\n", - " \"Provide a detailed, practical answer that includes specific instructions, \"\n", - " \"ingredients, and cooking tips. Format your response clearly with bullet points where appropriate.\"\n", - " )\n", - " \n", - " finance_template = PromptTemplate(\n", - " \"You are a financial analyst. Based on the provided financial data:\\n\"\n", - " \"{context_str}\\n\\n\"\n", - " \"Question: {query_str}\\n\\n\"\n", - " \"Provide a professional analysis with specific numbers, percentages, and actionable insights. \"\n", - " \"Include risk considerations where relevant.\"\n", - " )\n", - " \n", - " travel_template = PromptTemplate(\n", - " \"You are a travel advisor. Based on the provided travel information:\\n\"\n", - " \"{context_str}\\n\\n\"\n", - " \"Question: {query_str}\\n\\n\"\n", - " \"Provide comprehensive travel advice including practical tips, timing, costs, and local insights.\"\n", - " )\n", - " \n", - " # 1. Tree Summarize - Hierarchical synthesis\n", - " print(\"\\n1๏ธโƒฃ Tree Summarize - Hierarchical Information Building\")\n", - " print(\"-\" * 50)\n", - " \n", - " tree_synthesizer = TreeSummarize(\n", - " summary_template=cooking_template,\n", - " verbose=True\n", - " )\n", - " \n", - " tree_query_engine = advanced_index.as_query_engine(\n", - " response_synthesizer=tree_synthesizer,\n", - " similarity_top_k=8 # More nodes for hierarchical processing\n", - " )\n", - " \n", - " print(\"โœ“ Tree Summarize engine created\")\n", - " print(\" - Builds responses hierarchically\")\n", - " print(\" - Optimal for complex, multi-part questions\")\n", - " print(\" - Uses cooking-specific prompt template\")\n", - " \n", - " # 2. Refine - Iterative improvement\n", - " print(\"\\n2๏ธโƒฃ Refine - Iterative Response Improvement\")\n", - " print(\"-\" * 50)\n", - " \n", - " refine_synthesizer = Refine(\n", - " refine_template=PromptTemplate(\n", - " \"Original answer: {existing_answer}\\n\\n\"\n", - " \"New information: {context_msg}\\n\\n\"\n", - " \"Question: {query_str}\\n\\n\"\n", - " \"Refine the original answer using the new information. \"\n", - " \"Add details, correct inaccuracies, and improve completeness.\"\n", - " )\n", - " )\n", - " \n", - " refine_query_engine = advanced_index.as_query_engine(\n", - " response_synthesizer=refine_synthesizer,\n", - " similarity_top_k=6\n", - " )\n", - " \n", - " print(\"โœ“ Refine engine created\")\n", - " print(\" - Iteratively improves responses\")\n", - " print(\" - Great for comprehensive answers\")\n", - " print(\" - Incorporates multiple information sources\")\n", - " \n", - " # 3. Compact and Refine - Token-efficient processing\n", - " print(\"\\n3๏ธโƒฃ Compact and Refine - Token-Optimized Processing\")\n", - " print(\"-\" * 50)\n", - " \n", - " compact_synthesizer = CompactAndRefine(\n", - " text_qa_template=finance_template,\n", - " refine_template=PromptTemplate(\n", - " \"Financial Analysis: {existing_answer}\\n\\n\"\n", - " \"Additional Data: {context_msg}\\n\\n\"\n", - " \"Question: {query_str}\\n\\n\"\n", - " \"Update the financial analysis with the additional data. \"\n", - " \"Ensure all numbers and percentages are accurate.\"\n", - " )\n", - " )\n", - " \n", - " compact_query_engine = advanced_index.as_query_engine(\n", - " response_synthesizer=compact_synthesizer,\n", - " similarity_top_k=CONFIG[\"similarity_top_k\"]\n", - " )\n", - " \n", - " print(\"โœ“ Compact and Refine engine created\")\n", - " print(\" - Optimized for token efficiency\")\n", - " print(\" - Uses financial analysis template\")\n", - " print(\" - Balances quality and cost\")\n", - " \n", - " # 4. Simple Summarize with custom template\n", - " print(\"\\n4๏ธโƒฃ Simple Summarize - Direct Response Generation\")\n", - " print(\"-\" * 50)\n", - " \n", - " simple_synthesizer = SimpleSummarize(\n", - " text_qa_template=travel_template\n", - " )\n", - " \n", - " simple_query_engine = advanced_index.as_query_engine(\n", - " response_synthesizer=simple_synthesizer,\n", - " similarity_top_k=CONFIG[\"final_top_k\"]\n", - " )\n", - " \n", - " print(\"โœ“ Simple Summarize engine created\")\n", - " print(\" - Direct, straightforward responses\")\n", - " print(\" - Uses travel-specific template\")\n", - " print(\" - Fast and efficient\")\n", - " \n", - " return {\n", - " 'tree': tree_query_engine,\n", - " 'refine': refine_query_engine,\n", - " 'compact': compact_query_engine,\n", - " 'simple': simple_query_engine\n", - " }\n", - "\n", - "# Demonstrate response synthesizers\n", - "if advanced_index:\n", - " synthesizer_engines = demonstrate_response_synthesizers()\n", - " print(\"\\nโœ… Response synthesizer demonstrations complete!\")\n", - "else:\n", - " print(\"โŒ Cannot demonstrate synthesizers without index\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Structured Outputs - Type-Safe Data Extraction\n", - "\n", - "**The Problem**: Natural language responses are difficult for systems to parse reliably. Inconsistent formatting leads to integration failures, data extraction errors, and unreliable downstream processing.\n", - "\n", - "**The Solution**: Structured outputs use Pydantic models to enforce consistent, type-safe response schemas that integrate seamlessly with applications.\n", - "\n", - "**Key Benefits of Structured Outputs**:\n", - "\n", - "### ๐Ÿ›ก๏ธ Type Safety & Validation\n", - "- **Automatic Type Checking**: Ensures fields match expected data types\n", - "- **Input Validation**: Validates data constraints (min/max values, required fields)\n", - "- **Error Prevention**: Catches schema violations before they reach your application\n", - "- **IDE Support**: Full autocompletion and type hints\n", - "\n", - "### ๐Ÿ”„ Reliable Integration\n", - "- **API Endpoints**: Guaranteed JSON structure for API responses\n", - "- **Data Pipelines**: Consistent input format for downstream processing\n", - "- **Database Operations**: Direct mapping to database schemas\n", - "- **Frontend Integration**: Predictable data structure for UI components\n", - "\n", - "### ๐Ÿ“Š Domain-Specific Models\n", - "- **Recipe Extraction**: Structured cooking information (ingredients, time, difficulty)\n", - "- **Financial Analysis**: Investment data (returns, risk levels, recommendations) \n", - "- **Travel Planning**: Destination details (timing, attractions, budget)\n", - "- **Custom Domains**: Any domain can benefit from structured extraction\n", - "\n", - "**When to Use Structured Outputs**:\n", - "- โœ… **API Development**: When building RAG-powered APIs\n", - "- โœ… **Data Processing**: When feeding RAG results into other systems\n", - "- โœ… **Complex Entities**: When extracting multiple related fields\n", - "- โœ… **Quality Assurance**: When response format consistency is critical\n", - "- โŒ **Simple Q&A**: When natural language responses are sufficient\n", - "- โŒ **Exploratory Queries**: When you want flexibility in response format\n", - "\n", - "**Real-World Impact**: Structured outputs reduce integration failures by 90%+ and eliminate the need for custom parsing logic, saving significant development time.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“Š Structured Output Demonstrations\n", - "============================================================\n", - "\n", - "1๏ธโƒฃ Recipe Information Extractor\n", - "----------------------------------------\n", - "โœ“ Recipe extraction program created\n", - " - Extracts: name, cuisine, prep time, difficulty\n", - " - Includes: ingredients, calories, cooking steps\n", - "\n", - "2๏ธโƒฃ Investment Information Extractor\n", - "----------------------------------------\n", - "โœ“ Investment extraction program created\n", - " - Extracts: asset details, returns, risk levels\n", - " - Includes: recommendations and analysis\n", - "\n", - "3๏ธโƒฃ Travel Information Extractor\n", - "----------------------------------------\n", - "โœ“ Travel extraction program created\n", - " - Extracts: destinations, timing, attractions\n", - " - Includes: budget, cuisine, transportation\n", - "\n", - "โœ… Structured output demonstrations complete!\n" - ] - } - ], - "source": [ - "# Structured Outputs with Pydantic models\n", - "from llama_index.core.program import LLMTextCompletionProgram\n", - "from llama_index.core.output_parsers import PydanticOutputParser\n", - "\n", - "# Define structured output models for different domains\n", - "\n", - "class DifficultyLevel(str, Enum):\n", - " \"\"\"Recipe difficulty levels.\"\"\"\n", - " EASY = \"Easy\"\n", - " MEDIUM = \"Medium\"\n", - " HARD = \"Hard\"\n", - "\n", - "class RecipeInfo(BaseModel):\n", - " \"\"\"Structured recipe information extraction.\"\"\"\n", - " name: str = Field(description=\"Name of the recipe\")\n", - " cuisine: str = Field(description=\"Cuisine type (e.g., Italian, French)\")\n", - " prep_time_minutes: int = Field(description=\"Preparation time in minutes\")\n", - " difficulty: DifficultyLevel = Field(description=\"Recipe difficulty level\")\n", - " main_ingredients: List[str] = Field(description=\"List of main ingredients\")\n", - " calories_per_serving: Optional[int] = Field(description=\"Calories per serving if available\")\n", - " cooking_steps: List[str] = Field(description=\"Key cooking steps\")\n", - "\n", - "class RiskLevel(str, Enum):\n", - " \"\"\"Investment risk levels.\"\"\"\n", - " LOW = \"Low\"\n", - " MEDIUM = \"Medium\"\n", - " HIGH = \"High\"\n", - " VERY_HIGH = \"Very High\"\n", - "\n", - "class InvestmentInfo(BaseModel):\n", - " \"\"\"Structured investment information extraction.\"\"\"\n", - " asset_name: str = Field(description=\"Name of the investment asset\")\n", - " asset_type: str = Field(description=\"Type of asset (Stock, Bond, ETF, etc.)\")\n", - " current_value_usd: float = Field(description=\"Current value in USD\")\n", - " percentage_return: float = Field(description=\"Percentage return (positive or negative)\")\n", - " risk_level: RiskLevel = Field(description=\"Risk level of the investment\")\n", - " recommendation: str = Field(description=\"Investment recommendation or analysis\")\n", - "\n", - "class TravelInfo(BaseModel):\n", - " \"\"\"Structured travel information extraction.\"\"\"\n", - " destination: str = Field(description=\"Travel destination\")\n", - " best_time_to_visit: str = Field(description=\"Best time to visit\")\n", - " must_see_attractions: List[str] = Field(description=\"Must-see attractions\")\n", - " local_cuisine: List[str] = Field(description=\"Local cuisine highlights\")\n", - " budget_range_usd: str = Field(description=\"Daily budget range in USD\")\n", - " transportation_tips: List[str] = Field(description=\"Transportation recommendations\")\n", - "\n", - "def demonstrate_structured_outputs():\n", - " \"\"\"Demonstrate structured output extraction.\"\"\"\n", - " \n", - " print(\"๐Ÿ“Š Structured Output Demonstrations\")\n", - " print(\"=\" * 60)\n", - " \n", - " # 1. Recipe Information Extractor\n", - " print(\"\\n1๏ธโƒฃ Recipe Information Extractor\")\n", - " print(\"-\" * 40)\n", - " \n", - " recipe_program = LLMTextCompletionProgram.from_defaults(\n", - " output_parser=PydanticOutputParser(RecipeInfo),\n", - " prompt_template_str=(\n", - " \"Extract structured recipe information from the following context:\\n\"\n", - " \"{context}\\n\\n\"\n", - " \"Question: {query}\\n\\n\"\n", - " \"Provide the recipe information in the specified JSON format.\"\n", - " ),\n", - " verbose=True\n", - " )\n", - " \n", - " print(\"โœ“ Recipe extraction program created\")\n", - " print(\" - Extracts: name, cuisine, prep time, difficulty\")\n", - " print(\" - Includes: ingredients, calories, cooking steps\")\n", - " \n", - " # 2. Investment Analysis Extractor\n", - " print(\"\\n2๏ธโƒฃ Investment Information Extractor\")\n", - " print(\"-\" * 40)\n", - " \n", - " investment_program = LLMTextCompletionProgram.from_defaults(\n", - " output_parser=PydanticOutputParser(InvestmentInfo),\n", - " prompt_template_str=(\n", - " \"Extract structured investment information from the following context:\\n\"\n", - " \"{context}\\n\\n\"\n", - " \"Question: {query}\\n\\n\"\n", - " \"Provide the investment analysis in the specified JSON format.\"\n", - " ),\n", - " verbose=True\n", - " )\n", - " \n", - " print(\"โœ“ Investment extraction program created\")\n", - " print(\" - Extracts: asset details, returns, risk levels\")\n", - " print(\" - Includes: recommendations and analysis\")\n", - " \n", - " # 3. Travel Guide Extractor\n", - " print(\"\\n3๏ธโƒฃ Travel Information Extractor\")\n", - " print(\"-\" * 40)\n", - " \n", - " travel_program = LLMTextCompletionProgram.from_defaults(\n", - " output_parser=PydanticOutputParser(TravelInfo),\n", - " prompt_template_str=(\n", - " \"Extract structured travel information from the following context:\\n\"\n", - " \"{context}\\n\\n\"\n", - " \"Question: {query}\\n\\n\"\n", - " \"Provide the travel guide information in the specified JSON format.\"\n", - " ),\n", - " verbose=True\n", - " )\n", - " \n", - " print(\"โœ“ Travel extraction program created\")\n", - " print(\" - Extracts: destinations, timing, attractions\")\n", - " print(\" - Includes: budget, cuisine, transportation\")\n", - " \n", - " return {\n", - " 'recipe': recipe_program,\n", - " 'investment': investment_program,\n", - " 'travel': travel_program\n", - " }\n", - "\n", - "# Demonstrate structured outputs\n", - "structured_programs = demonstrate_structured_outputs()\n", - "print(\"\\nโœ… Structured output demonstrations complete!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7. Comprehensive Advanced RAG Demonstrations\n", - "\n", - "**Purpose**: Compare advanced techniques against baseline RAG using real queries across different domains, measuring concrete improvements in response quality, relevance, and structure.\n", - "\n", - "**What We'll Demonstrate**:\n", - "- **Baseline RAG**: Standard vector retrieval + simple generation\n", - "- **With Postprocessors**: Same query with intelligent filtering applied\n", - "- **With Advanced Synthesizers**: Domain-optimized response formatting\n", - "- **With Structured Outputs**: Type-safe data extraction\n", - "\n", - "**Measurement Criteria**:\n", - "- **Response Quality**: Relevance, completeness, and accuracy\n", - "- **Performance**: Latency and token usage trade-offs\n", - "- **Source Diversity**: How well techniques handle cross-modal information\n", - "- **Business Value**: Practical applicability to real-world use cases\n", - "\n", - "**Test Domains**:\n", - "- **Cooking**: Complex procedural information with specific requirements\n", - "- **Finance**: Numerical data requiring accuracy and risk assessment \n", - "- **Travel**: Multi-faceted planning information across different criteria\n", - "\n", - "This side-by-side comparison will show exactly when and why to use each advanced technique.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Comprehensive Advanced RAG Demonstrations\n", - "======================================================================\n", - "\n", - "============================================================\n", - "๐ŸŽฏ DOMAIN: COOKING\n", - "โ“ QUERY: How do I make Spaghetti Carbonara? What are the key steps and ingredients?\n", - "============================================================\n", - "\n", - "1๏ธโƒฃ Standard RAG Response:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:01:21,742 - INFO - query_type :, vector\n", - "2025-09-20 13:01:24,100 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:01:34,613 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Empty Response...\n", - "Time: 13.12s\n", - "\n", - "2๏ธโƒฃ With Node Postprocessors:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:01:36,276 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Empty Response...\n", - "Time: 15.76s\n", - "Improvement: Filtered low-relevance results\n", - "\n", - "3๏ธโƒฃ With Tree Summarize (Cooking-Optimized):\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:01:50,448 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1 text chunks after repacking\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:01:51,653 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:02:00,292 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: None...\n", - "Time: 9.99s\n", - "Improvement: Hierarchical recipe instructions\n", - "\n", - "4๏ธโƒฃ Structured Output Extraction:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:02:01,526 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:02:10,310 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Structured extraction error: Could not extract json string from output: \n", - "Note: This is normal - structured extraction requires specific data patterns\n", - "\n", - "============================================================\n", - "๐ŸŽฏ DOMAIN: FINANCE\n", - "โ“ QUERY: Which stock in my portfolio has the highest return and what's the risk level?\n", - "============================================================\n", - "\n", - "1๏ธโƒฃ Standard RAG Response:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:02:11,715 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:02:17,213 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: NVIDIA (NVDA) โ€” 50.0% return, risk level: High....\n", - "Time: 6.96s\n", - "\n", - "2๏ธโƒฃ With Node Postprocessors:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:02:18,414 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Empty Response...\n", - "Time: 10.83s\n", - "Improvement: Filtered low-relevance results\n", - "\n", - "3๏ธโƒฃ With Compact Refine (Finance-Optimized):\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:02:28,677 - INFO - query_type :, vector\n", - "2025-09-20 13:02:29,944 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:02:45,684 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Empty Response...\n", - "Time: 27.74s\n", - "Improvement: Financial analysis formatting\n", - "\n", - "4๏ธโƒฃ Structured Output Extraction:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:02:56,079 - INFO - query_type :, vector\n", - "2025-09-20 13:02:57,157 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Structured extraction error: Could not extract json string from output: \n", - "Note: This is normal - structured extraction requires specific data patterns\n", - "\n", - "============================================================\n", - "๐ŸŽฏ DOMAIN: TRAVEL\n", - "โ“ QUERY: What's the best time to visit Tokyo and what should I budget for?\n", - "============================================================\n", - "\n", - "1๏ธโƒฃ Standard RAG Response:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:03:06,947 - INFO - query_type :, vector\n", - "2025-09-20 13:03:07,732 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:03:13,229 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Best time to visit Tokyo: Marchโ€“May (cherry blossoms) and Septemberโ€“November.\n", - "\n", - "Budget (mid-range): ยฅ12,000โ€“18,000 per day....\n", - "Time: 6.38s\n", - "\n", - "2๏ธโƒฃ With Node Postprocessors:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:03:14,710 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Best times: Marchโ€“May (cherry blossom season) and Septemberโ€“November. \n", - "Budget (mid-range): about ยฅ12,000โ€“18,000 per day....\n", - "Time: 6.37s\n", - "Improvement: Filtered low-relevance results\n", - "\n", - "3๏ธโƒฃ With Simple Summarize (Travel-Optimized):\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:03:19,882 - INFO - query_type :, vector\n", - "2025-09-20 13:03:22,077 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n", - "2025-09-20 13:03:30,895 - INFO - query_type :, vector\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Response: Empty Response...\n", - "Time: 11.27s\n", - "Improvement: Travel-specific formatting\n", - "\n", - "4๏ธโƒฃ Structured Output Extraction:\n", - "----------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2025-09-20 13:03:32,153 - INFO - HTTP Request: POST https://openrouter.ai/api/v1/chat/completions \"HTTP/1.1 200 OK\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Structured extraction error: Could not extract json string from output: \n", - "Note: This is normal - structured extraction requires specific data patterns\n", - "\n", - "======================================================================\n", - "โœ… Comprehensive demonstrations complete!\n", - "======================================================================\n", - "\n", - "๐Ÿ“‹ Advanced Techniques Demonstrated:\n", - " 1. Node Postprocessors - Similarity & keyword filtering\n", - " 2. Response Synthesizers - Tree, Refine, Compact strategies\n", - " 3. Structured Outputs - Type-safe Pydantic models\n", - " 4. Custom Templates - Domain-specific response formatting\n", - " 5. Multi-stage Processing - Chained advanced techniques\n" - ] - } - ], - "source": [ - "# Comprehensive demonstrations of all advanced techniques\n", - "\n", - "def run_comprehensive_demonstrations():\n", - " \"\"\"Run comprehensive demonstrations of all advanced RAG techniques.\"\"\"\n", - " \n", - " print(\"๐Ÿš€ Comprehensive Advanced RAG Demonstrations\")\n", - " print(\"=\" * 70)\n", - " \n", - " # Test queries for different domains\n", - " test_queries = {\n", - " 'cooking': \"How do I make Spaghetti Carbonara? What are the key steps and ingredients?\",\n", - " 'finance': \"Which stock in my portfolio has the highest return and what's the risk level?\",\n", - " 'travel': \"What's the best time to visit Tokyo and what should I budget for?\",\n", - " }\n", - " \n", - " for domain, query in test_queries.items():\n", - " print(f\"\\n{'='*60}\")\n", - " print(f\"๐ŸŽฏ DOMAIN: {domain.upper()}\")\n", - " print(f\"โ“ QUERY: {query}\")\n", - " print(f\"{'='*60}\")\n", - " \n", - " # 1. Standard RAG (baseline)\n", - " print(\"\\n1๏ธโƒฃ Standard RAG Response:\")\n", - " print(\"-\" * 40)\n", - " \n", - " start_time = time.time()\n", - " standard_response = advanced_index.as_query_engine().query(query)\n", - " standard_time = time.time() - start_time\n", - " \n", - " print(f\"Response: {str(standard_response)[:200]}...\")\n", - " print(f\"Time: {standard_time:.2f}s\")\n", - " \n", - " # 2. Advanced RAG with postprocessors\n", - " if 'combined_engine' in postprocessor_engines:\n", - " print(\"\\n2๏ธโƒฃ With Node Postprocessors:\")\n", - " print(\"-\" * 40)\n", - " \n", - " start_time = time.time()\n", - " processed_response = postprocessor_engines['combined_engine'].query(query)\n", - " processed_time = time.time() - start_time\n", - " \n", - " print(f\"Response: {str(processed_response)[:200]}...\")\n", - " print(f\"Time: {processed_time:.2f}s\")\n", - " print(f\"Improvement: Filtered low-relevance results\")\n", - " \n", - " # 3. Advanced synthesizer based on domain\n", - " if domain == 'cooking' and 'tree' in synthesizer_engines:\n", - " print(\"\\n3๏ธโƒฃ With Tree Summarize (Cooking-Optimized):\")\n", - " print(\"-\" * 40)\n", - " \n", - " start_time = time.time()\n", - " tree_response = synthesizer_engines['tree'].query(query)\n", - " tree_time = time.time() - start_time\n", - " \n", - " print(f\"Response: {str(tree_response)[:200]}...\")\n", - " print(f\"Time: {tree_time:.2f}s\")\n", - " print(f\"Improvement: Hierarchical recipe instructions\")\n", - " \n", - " elif domain == 'finance' and 'compact' in synthesizer_engines:\n", - " print(\"\\n3๏ธโƒฃ With Compact Refine (Finance-Optimized):\")\n", - " print(\"-\" * 40)\n", - " \n", - " start_time = time.time()\n", - " compact_response = synthesizer_engines['compact'].query(query)\n", - " compact_time = time.time() - start_time\n", - " \n", - " print(f\"Response: {str(compact_response)[:200]}...\")\n", - " print(f\"Time: {compact_time:.2f}s\")\n", - " print(f\"Improvement: Financial analysis formatting\")\n", - " \n", - " elif domain == 'travel' and 'simple' in synthesizer_engines:\n", - " print(\"\\n3๏ธโƒฃ With Simple Summarize (Travel-Optimized):\")\n", - " print(\"-\" * 40)\n", - " \n", - " start_time = time.time()\n", - " simple_response = synthesizer_engines['simple'].query(query)\n", - " simple_time = time.time() - start_time\n", - " \n", - " print(f\"Response: {str(simple_response)[:200]}...\")\n", - " print(f\"Time: {simple_time:.2f}s\")\n", - " print(f\"Improvement: Travel-specific formatting\")\n", - " \n", - " # 4. Structured output extraction\n", - " print(\"\\n4๏ธโƒฃ Structured Output Extraction:\")\n", - " print(\"-\" * 40)\n", - " \n", - " try:\n", - " # Get relevant context for structured extraction\n", - " retriever = VectorIndexRetriever(\n", - " index=advanced_index,\n", - " similarity_top_k=3\n", - " )\n", - " nodes = retriever.retrieve(query)\n", - " context = \"\\n\".join([node.text for node in nodes])\n", - " \n", - " start_time = time.time()\n", - " \n", - " if domain == 'cooking' and 'recipe' in structured_programs:\n", - " structured_result = structured_programs['recipe'](\n", - " context=context,\n", - " query=query\n", - " )\n", - " print(f\"Structured Recipe: {structured_result.name}\")\n", - " print(f\"Prep Time: {structured_result.prep_time_minutes} minutes\")\n", - " print(f\"Difficulty: {structured_result.difficulty}\")\n", - " print(f\"Main Ingredients: {', '.join(structured_result.main_ingredients[:3])}...\")\n", - " \n", - " elif domain == 'finance' and 'investment' in structured_programs:\n", - " structured_result = structured_programs['investment'](\n", - " context=context,\n", - " query=query\n", - " )\n", - " print(f\"Asset: {structured_result.asset_name}\")\n", - " print(f\"Return: {structured_result.percentage_return}%\")\n", - " print(f\"Risk Level: {structured_result.risk_level}\")\n", - " print(f\"Value: ${structured_result.current_value_usd:,.2f}\")\n", - " \n", - " elif domain == 'travel' and 'travel' in structured_programs:\n", - " structured_result = structured_programs['travel'](\n", - " context=context,\n", - " query=query\n", - " )\n", - " print(f\"Destination: {structured_result.destination}\")\n", - " print(f\"Best Time: {structured_result.best_time_to_visit}\")\n", - " print(f\"Budget: {structured_result.budget_range_usd}\")\n", - " print(f\"Attractions: {', '.join(structured_result.must_see_attractions[:2])}...\")\n", - " \n", - " structured_time = time.time() - start_time\n", - " print(f\"Time: {structured_time:.2f}s\")\n", - " print(f\"Improvement: Type-safe structured data\")\n", - " \n", - " except Exception as e:\n", - " print(f\"Structured extraction error: {e}\")\n", - " print(\"Note: This is normal - structured extraction requires specific data patterns\")\n", - " \n", - " print(f\"\\n{'='*70}\")\n", - " print(\"โœ… Comprehensive demonstrations complete!\")\n", - " print(f\"{'='*70}\")\n", - " \n", - " # Summary of techniques demonstrated\n", - " print(\"\\n๐Ÿ“‹ Advanced Techniques Demonstrated:\")\n", - " print(\" 1. Node Postprocessors - Similarity & keyword filtering\")\n", - " print(\" 2. Response Synthesizers - Tree, Refine, Compact strategies\")\n", - " print(\" 3. Structured Outputs - Type-safe Pydantic models\")\n", - " print(\" 4. Custom Templates - Domain-specific response formatting\")\n", - " print(\" 5. Multi-stage Processing - Chained advanced techniques\")\n", - "\n", - "# Run comprehensive demonstrations\n", - "if (advanced_index and \n", - " 'postprocessor_engines' in locals() and \n", - " 'synthesizer_engines' in locals() and \n", - " 'structured_programs' in locals()):\n", - " \n", - " run_comprehensive_demonstrations()\n", - "else:\n", - " print(\"โŒ Cannot run comprehensive demonstrations - missing components\")\n", - " print(\"Please ensure all previous cells have been executed successfully\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 8. Performance Analysis and Best Practices\n", - "\n", - "**Purpose**: Translate demonstration results into actionable production guidance with specific configuration recommendations for different use cases.\n", - "\n", - "**What This Analysis Provides**:\n", - "- **Performance Trade-offs**: Latency vs quality vs cost for each technique\n", - "- **Configuration Guidance**: Optimal settings for different environments\n", - "- **Decision Framework**: When to use which technique based on requirements\n", - "- **Production Patterns**: Proven configurations for real-world deployments\n", - "\n", - "**Decision Matrix by Use Case**:\n", - "- **Development/Testing**: Fast iteration with good quality\n", - "- **Production (Speed)**: Prioritize low latency for user-facing applications \n", - "- **Production (Quality)**: Prioritize accuracy for high-stakes applications\n", - "- **Cost-Conscious**: Optimize for token usage while maintaining quality\n", - "\n", - "**Key Insights You'll Gain**:\n", - "- Which techniques provide the best ROI for your specific needs\n", - "- How to configure systems for optimal performance in your environment\n", - "- Common pitfalls and how to avoid them\n", - "- Monitoring strategies for production systems\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿ“Š Performance Analysis & Best Practices\n", - "======================================================================\n", - "\n", - "๐Ÿ” Technique Comparison:\n", - "--------------------------------------------------\n", - "\n", - "๐Ÿ“‹ Standard RAG\n", - " Latency: Low (1-3s)\n", - " Accuracy: Baseline\n", - " Cost: Low\n", - " Best for: General queries, fast responses\n", - " โœ… Pros: Fast, Simple, Cost-effective\n", - " โš ๏ธ Cons: May include irrelevant results, Basic formatting\n", - "\n", - "๐Ÿ“‹ Node Postprocessors\n", - " Latency: Low (1-4s)\n", - " Accuracy: Higher\n", - " Cost: Low\n", - " Best for: Filtering noisy results, domain-specific queries\n", - " โœ… Pros: Better relevance, Configurable filters, Minimal cost\n", - " โš ๏ธ Cons: May over-filter, Requires tuning\n", - "\n", - "๐Ÿ“‹ Response Synthesizers\n", - " Latency: Medium (3-8s)\n", - " Accuracy: Much Higher\n", - " Cost: Medium\n", - " Best for: Complex queries, detailed responses\n", - " โœ… Pros: Rich responses, Domain-specific formatting, Hierarchical processing\n", - " โš ๏ธ Cons: Higher latency, More token usage\n", - "\n", - "๐Ÿ“‹ Structured Outputs\n", - " Latency: Medium (3-6s)\n", - " Accuracy: Highest\n", - " Cost: Medium\n", - " Best for: Data extraction, API integration\n", - " โœ… Pros: Type-safe, Reliable format, Easy integration\n", - " โš ๏ธ Cons: Schema dependency, Less flexibility\n", - "\n", - "\n", - "๐Ÿ’ก Best Practices & Recommendations:\n", - "==================================================\n", - "\n", - "๐ŸŽฏ When to Use Each Technique:\n", - " โ€ข Node Postprocessors: Always use for production - minimal cost, big improvement\n", - " โ€ข Response Synthesizers: Use for complex, multi-part questions\n", - " โ€ข Structured Outputs: Use for data extraction and API integration\n", - "\n", - "โšก Performance Optimization:\n", - " โ€ข Start with smaller chunk sizes (512) for better precision\n", - " โ€ข Use similarity cutoffs (0.3+) to filter noise\n", - " โ€ข Retrieve more candidates (10+) for better postprocessing\n", - " โ€ข Cache embeddings and indexes for faster queries\n", - "\n", - "๐Ÿ’ฐ Cost Management:\n", - " โ€ข Use local embeddings to reduce API costs\n", - " โ€ข Implement similarity filtering before expensive synthesis\n", - " โ€ข Choose synthesizer based on query complexity\n", - " โ€ข Monitor token usage in production\n", - "\n", - "๐ŸŽจ Quality Improvement:\n", - " โ€ข Create domain-specific prompt templates\n", - " โ€ข Tune postprocessor thresholds for your data\n", - " โ€ข Use structured outputs for consistent results\n", - " โ€ข A/B test different configurations\n", - "\n", - "\n", - "โš™๏ธ Recommended Configurations:\n", - "========================================\n", - "\n", - "๐Ÿ“Š Development/Testing:\n", - " chunk_size: 512\n", - " similarity_top_k: 5\n", - " similarity_cutoff: 0.2\n", - " synthesizer: Simple\n", - " postprocessors: Similarity only\n", - "\n", - "๐Ÿ“Š Production (Fast):\n", - " chunk_size: 1024\n", - " similarity_top_k: 8\n", - " similarity_cutoff: 0.3\n", - " synthesizer: Compact\n", - " postprocessors: Similarity + Keyword\n", - "\n", - "๐Ÿ“Š Production (Quality):\n", - " chunk_size: 512\n", - " similarity_top_k: 12\n", - " similarity_cutoff: 0.25\n", - " synthesizer: Tree/Refine\n", - " postprocessors: Multi-stage\n", - "\n", - "\n", - "๐ŸŽŠ Advanced RAG Tutorial Complete!\n", - "You now have the tools to build sophisticated, production-ready RAG systems!\n" - ] - } - ], - "source": [ - "# Performance analysis and best practices\n", - "\n", - "def analyze_performance_characteristics():\n", - " \"\"\"Analyze performance of different advanced RAG techniques.\"\"\"\n", - " \n", - " print(\"๐Ÿ“Š Performance Analysis & Best Practices\")\n", - " print(\"=\" * 70)\n", - " \n", - " # Performance characteristics\n", - " techniques = {\n", - " \"Standard RAG\": {\n", - " \"latency\": \"Low (1-3s)\",\n", - " \"accuracy\": \"Baseline\",\n", - " \"cost\": \"Low\",\n", - " \"use_case\": \"General queries, fast responses\",\n", - " \"pros\": [\"Fast\", \"Simple\", \"Cost-effective\"],\n", - " \"cons\": [\"May include irrelevant results\", \"Basic formatting\"]\n", - " },\n", - " \"Node Postprocessors\": {\n", - " \"latency\": \"Low (1-4s)\",\n", - " \"accuracy\": \"Higher\",\n", - " \"cost\": \"Low\",\n", - " \"use_case\": \"Filtering noisy results, domain-specific queries\",\n", - " \"pros\": [\"Better relevance\", \"Configurable filters\", \"Minimal cost\"],\n", - " \"cons\": [\"May over-filter\", \"Requires tuning\"]\n", - " },\n", - " \"Response Synthesizers\": {\n", - " \"latency\": \"Medium (3-8s)\",\n", - " \"accuracy\": \"Much Higher\",\n", - " \"cost\": \"Medium\",\n", - " \"use_case\": \"Complex queries, detailed responses\",\n", - " \"pros\": [\"Rich responses\", \"Domain-specific formatting\", \"Hierarchical processing\"],\n", - " \"cons\": [\"Higher latency\", \"More token usage\"]\n", - " },\n", - " \"Structured Outputs\": {\n", - " \"latency\": \"Medium (3-6s)\",\n", - " \"accuracy\": \"Highest\",\n", - " \"cost\": \"Medium\",\n", - " \"use_case\": \"Data extraction, API integration\",\n", - " \"pros\": [\"Type-safe\", \"Reliable format\", \"Easy integration\"],\n", - " \"cons\": [\"Schema dependency\", \"Less flexibility\"]\n", - " }\n", - " }\n", - " \n", - " print(\"\\n๐Ÿ” Technique Comparison:\")\n", - " print(\"-\" * 50)\n", - " \n", - " for technique, specs in techniques.items():\n", - " print(f\"\\n๐Ÿ“‹ {technique}\")\n", - " print(f\" Latency: {specs['latency']}\")\n", - " print(f\" Accuracy: {specs['accuracy']}\")\n", - " print(f\" Cost: {specs['cost']}\")\n", - " print(f\" Best for: {specs['use_case']}\")\n", - " print(f\" โœ… Pros: {', '.join(specs['pros'])}\")\n", - " print(f\" โš ๏ธ Cons: {', '.join(specs['cons'])}\")\n", - " \n", - " # Best practices recommendations\n", - " print(\"\\n\\n๐Ÿ’ก Best Practices & Recommendations:\")\n", - " print(\"=\" * 50)\n", - " \n", - " recommendations = [\n", - " {\n", - " \"category\": \"๐ŸŽฏ When to Use Each Technique\",\n", - " \"tips\": [\n", - " \"Node Postprocessors: Always use for production - minimal cost, big improvement\",\n", - " \"Response Synthesizers: Use for complex, multi-part questions\",\n", - " \"Structured Outputs: Use for data extraction and API integration\"\n", - " ]\n", - " },\n", - " {\n", - " \"category\": \"โšก Performance Optimization\",\n", - " \"tips\": [\n", - " \"Start with smaller chunk sizes (512) for better precision\",\n", - " \"Use similarity cutoffs (0.3+) to filter noise\",\n", - " \"Retrieve more candidates (10+) for better postprocessing\",\n", - " \"Cache embeddings and indexes for faster queries\"\n", - " ]\n", - " },\n", - " {\n", - " \"category\": \"๐Ÿ’ฐ Cost Management\",\n", - " \"tips\": [\n", - " \"Use local embeddings to reduce API costs\",\n", - " \"Implement similarity filtering before expensive synthesis\",\n", - " \"Choose synthesizer based on query complexity\",\n", - " \"Monitor token usage in production\"\n", - " ]\n", - " },\n", - " {\n", - " \"category\": \"๐ŸŽจ Quality Improvement\",\n", - " \"tips\": [\n", - " \"Create domain-specific prompt templates\",\n", - " \"Tune postprocessor thresholds for your data\",\n", - " \"Use structured outputs for consistent results\",\n", - " \"A/B test different configurations\"\n", - " ]\n", - " }\n", - " ]\n", - " \n", - " for rec in recommendations:\n", - " print(f\"\\n{rec['category']}:\")\n", - " for tip in rec['tips']:\n", - " print(f\" โ€ข {tip}\")\n", - " \n", - " # Configuration recommendations\n", - " print(\"\\n\\nโš™๏ธ Recommended Configurations:\")\n", - " print(\"=\" * 40)\n", - " \n", - " configs = {\n", - " \"Development/Testing\": {\n", - " \"chunk_size\": 512,\n", - " \"similarity_top_k\": 5,\n", - " \"similarity_cutoff\": 0.2,\n", - " \"synthesizer\": \"Simple\",\n", - " \"postprocessors\": \"Similarity only\"\n", - " },\n", - " \"Production (Fast)\": {\n", - " \"chunk_size\": 1024,\n", - " \"similarity_top_k\": 8,\n", - " \"similarity_cutoff\": 0.3,\n", - " \"synthesizer\": \"Compact\",\n", - " \"postprocessors\": \"Similarity + Keyword\"\n", - " },\n", - " \"Production (Quality)\": {\n", - " \"chunk_size\": 512,\n", - " \"similarity_top_k\": 12,\n", - " \"similarity_cutoff\": 0.25,\n", - " \"synthesizer\": \"Tree/Refine\",\n", - " \"postprocessors\": \"Multi-stage\"\n", - " }\n", - " }\n", - " \n", - " for env, config in configs.items():\n", - " print(f\"\\n๐Ÿ“Š {env}:\")\n", - " for param, value in config.items():\n", - " print(f\" {param}: {value}\")\n", - "\n", - "# Run performance analysis\n", - "analyze_performance_characteristics()\n", - "\n", - "print(\"\\n\\n๐ŸŽŠ Advanced RAG Tutorial Complete!\")\n", - "print(\"You now have the tools to build sophisticated, production-ready RAG systems!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conclusion\n", - "\n", - "๐ŸŽ‰ **Congratulations!** You have successfully mastered **Advanced RAG Techniques** with LlamaIndex!\n", - "\n", - "### What We Accomplished\n", - "\n", - "This comprehensive tutorial demonstrated sophisticated RAG techniques using real multimodal data:\n", - "\n", - "#### ๐Ÿ”ง **Node Postprocessors Mastery**\n", - "- โœ… **Similarity Filtering**: Automated relevance-based result filtering\n", - "- โœ… **Keyword Filtering**: Content-based inclusion/exclusion rules\n", - "- โœ… **Multi-stage Processing**: Chained postprocessor pipelines\n", - "- โœ… **Custom Filtering**: Domain-specific result refinement\n", - "\n", - "#### ๐ŸŽฏ **Response Synthesizers Expertise**\n", - "- โœ… **Tree Summarize**: Hierarchical response building for complex queries\n", - "- โœ… **Refine**: Iterative response improvement with multiple sources\n", - "- โœ… **Compact and Refine**: Token-optimized processing\n", - "- โœ… **Custom Templates**: Domain-specific response formatting\n", - "- โœ… **Template Optimization**: Cooking, finance, travel-specific prompts\n", - "\n", - "#### ๐Ÿ“Š **Structured Output Mastery**\n", - "- โœ… **Pydantic Models**: Type-safe data extraction schemas\n", - "- โœ… **Domain Models**: Recipe, Investment, Travel extractors\n", - "- โœ… **Enum Support**: Controlled vocabulary enforcement\n", - "- โœ… **JSON Schema**: Reliable structured data formatting\n", - "\n", - "#### โšก **Performance & Production Insights**\n", - "- โœ… **Latency Optimization**: Performance vs quality trade-offs\n", - "- โœ… **Cost Management**: Token usage optimization strategies\n", - "- โœ… **Configuration Tuning**: Environment-specific recommendations\n", - "- โœ… **Best Practices**: Production deployment guidelines\n", - "\n", - "### Real-World Applications\n", - "\n", - "These advanced techniques enable sophisticated applications:\n", - "\n", - "- **๐Ÿข Enterprise RAG**: Multi-stage filtering for accurate business intelligence\n", - "- **๐Ÿ”ฌ Research Systems**: Hierarchical synthesis for complex analysis\n", - "- **๐Ÿ›’ E-commerce**: Hybrid search for product discovery\n", - "- **๐Ÿฅ Healthcare**: Structured extraction for medical data processing\n", - "- **๐ŸŽ“ Educational**: Domain-specific response formatting\n", - "- **๐Ÿ“ฑ APIs**: Type-safe data extraction for system integration\n", - "\n", - "### Key Takeaways\n", - "\n", - "1. **๐ŸŽฏ Postprocessors are Essential**: Always use similarity filtering in production\n", - "2. **๐ŸŽจ Templates Matter**: Domain-specific prompts dramatically improve quality\n", - "3. **โš–๏ธ Balance is Key**: Choose techniques based on latency vs quality needs\n", - "4. **๐Ÿ”ง Tuning is Critical**: Configuration significantly impacts performance\n", - "5. **๐Ÿ“Š Structure Enables Integration**: Pydantic models ensure reliable data flow\n", - "\n", - "### Architecture Comparison\n", - "\n", - "| Technique | Latency | Accuracy | Cost | Best Use Case |\n", - "|-----------|---------|----------|------|---------------|\n", - "| **Standard RAG** | Low (1-3s) | Baseline | Low | General queries |\n", - "| **+ Postprocessors** | Low (1-4s) | Higher | Low | Filtered results |\n", - "| **+ Synthesizers** | Medium (3-8s) | Much Higher | Medium | Complex queries |\n", - "| **+ Structured** | Medium (3-6s) | Highest | Medium | Data extraction |\n", - "\n", - "### Next Steps\n", - "\n", - "Continue your RAG journey by:\n", - "\n", - "1. **๐Ÿ”„ Implementing A/B Testing**: Compare different technique combinations\n", - "2. **๐Ÿ“ˆ Adding Evaluation Metrics**: Monitor accuracy and performance\n", - "3. **๐ŸŒ Scaling to Production**: Implement async processing and caching\n", - "4. **๐Ÿค– Building Agents**: Combine RAG with tool-using agents\n", - "5. **๐Ÿ”ฎ Exploring Cutting-Edge**: Keep up with latest LlamaIndex features\n", - "\n", - "---\n", - "\n", - "**๐Ÿš€ You're now equipped to build world-class RAG systems!** \n", - "\n", - "The techniques you've learned represent the current state-of-the-art in retrieval-augmented generation, enabling you to create sophisticated, production-ready applications that can handle complex queries across diverse data types with unprecedented accuracy and reliability.\n", - "\n", - "### Final Configuration Template\n", - "\n", - "```python\n", - "# Production-Ready Advanced RAG Configuration\n", - "CONFIG = {\n", - " \"llm_model\": \"gpt-4o\",\n", - " \"embedding_model\": \"local:BAAI/bge-small-en-v1.5\", \n", - " \"chunk_size\": 512, # Precision over speed\n", - " \"chunk_overlap\": 50, # Minimal overlap\n", - " \"similarity_top_k\": 10, # More candidates\n", - " \"final_top_k\": 5, # Refined results\n", - " \"similarity_cutoff\": 0.3, # Quality threshold\n", - "}\n", - "\n", - "# Multi-stage postprocessing pipeline\n", - "postprocessors = [\n", - " SimilarityPostprocessor(similarity_cutoff=0.3),\n", - " KeywordNodePostprocessor(exclude_keywords=[\"noise\", \"irrelevant\"])\n", - "]\n", - "\n", - "# Domain-specific synthesizers\n", - "synthesizers = {\n", - " \"cooking\": TreeSummarize(summary_template=cooking_template),\n", - " \"finance\": CompactAndRefine(text_qa_template=finance_template),\n", - " \"travel\": SimpleSummarize(text_qa_template=travel_template)\n", - "}\n", - "\n", - "# Structured output models for reliable data extraction\n", - "structured_models = {\n", - " \"recipe\": PydanticOutputParser(RecipeInfo),\n", - " \"investment\": PydanticOutputParser(InvestmentInfo),\n", - " \"travel\": PydanticOutputParser(TravelInfo)\n", - "}\n", - "```\n", - "\n", - "**Happy building!** ๐Ÿฆ™๐Ÿ“šโœจ\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "accelerator", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Basavaraj_Hiremath/session_2/papers/agents/AI_Agents_vs_Agentic_AI.pdf b/Girish_Basavaraj_Hiremath/session_2/papers/agents/AI_Agents_vs_Agentic_AI.pdf deleted file mode 100644 index 4276fe2..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/papers/agents/AI_Agents_vs_Agentic_AI.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/papers/agents/Emerging_Agent_Architectures.pdf b/Girish_Basavaraj_Hiremath/session_2/papers/agents/Emerging_Agent_Architectures.pdf deleted file mode 100644 index b16b0b6..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/papers/agents/Emerging_Agent_Architectures.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/papers/agents/LLMReasoning_to_Autonomous_Agents.pdf b/Girish_Basavaraj_Hiremath/session_2/papers/agents/LLMReasoning_to_Autonomous_Agents.pdf deleted file mode 100644 index 7749c0d..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/papers/agents/LLMReasoning_to_Autonomous_Agents.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/papers/agents/Rise_and_Potential_LLM_Agents.pdf b/Girish_Basavaraj_Hiremath/session_2/papers/agents/Rise_and_Potential_LLM_Agents.pdf deleted file mode 100644 index c600ee5..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/papers/agents/Rise_and_Potential_LLM_Agents.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/papers/agents/survey_of_self_evolving_agents.pdf b/Girish_Basavaraj_Hiremath/session_2/papers/agents/survey_of_self_evolving_agents.pdf deleted file mode 100644 index 29bb5b3..0000000 Binary files a/Girish_Basavaraj_Hiremath/session_2/papers/agents/survey_of_self_evolving_agents.pdf and /dev/null differ diff --git a/Girish_Basavaraj_Hiremath/session_2/prompt_samples/assignment_generator.yaml b/Girish_Basavaraj_Hiremath/session_2/prompt_samples/assignment_generator.yaml deleted file mode 100644 index c4a0ead..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/prompt_samples/assignment_generator.yaml +++ /dev/null @@ -1,94 +0,0 @@ -system_prompt: | - You are an expert educational content creator and assessment specialist. Your task is to generate comprehensive, practical assignments based on YouTube video summaries to help students apply and reinforce their learning. - - Your assignments should: - - **EDUCATIONAL OBJECTIVES:** - - Test practical understanding, not just memorization - - Encourage hands-on implementation and experimentation - - Build progressive complexity from basic concepts to advanced applications - - Promote critical thinking and problem-solving skills - - Include real-world scenarios and use cases - - **ASSIGNMENT STRUCTURE:** - Create a well-structured assignment with the following sections: - - 1. **Assignment Overview** (2-3 sentences) - - Clear learning objective - - Expected outcome or deliverable - - Estimated time commitment - - 2. **Prerequisite Knowledge** (bullet points) - - Essential concepts students should know before starting - - Recommended prior experience or skills - - 3. **Core Tasks** (3-5 main tasks) - - Each task should build upon the previous one - - Include specific, actionable steps - - Mix theoretical understanding with practical implementation - - Provide clear success criteria for each task - - 4. **Practical Exercises** (2-4 hands-on activities) - - Coding exercises, projects, or experiments - - Real-world problem-solving scenarios - - Troubleshooting or debugging challenges - - Performance optimization tasks - - 5. **Advanced Challenges** (1-2 extension activities) - - For students who complete core tasks early - - More complex scenarios or integrations - - Research-based questions - - Creative problem-solving opportunities - - 6. **Assessment Criteria** (clear rubric) - - What constitutes successful completion - - Quality indicators and best practices - - Common pitfalls to avoid - - Bonus points opportunities - - 7. **Resources & References** - - Additional learning materials - - Documentation links - - Community resources or forums - - Tools and software recommendations - - **FORMATTING GUIDELINES:** - - Use clear headings and subheadings - - Include code snippets or examples where relevant - - Use bullet points for lists and action items - - Add emojis strategically for visual organization - - Ensure tasks are specific and measurable - - Include estimated time for each section - - **DIFFICULTY ADAPTATION:** - - For Beginner content: Focus on foundational concepts, guided exercises, step-by-step instructions - - For Intermediate content: Include problem-solving scenarios, multiple solution approaches, integration tasks - - For Advanced content: Emphasize optimization, architecture decisions, and innovative applications - - **PRACTICAL FOCUS:** - - Every assignment should result in a tangible deliverable - - Include testing and validation steps - - Encourage documentation and explanation of decisions - - Promote best practices and industry standards - - Respond with a comprehensive assignment in markdown format that educators can directly use with students. Make the assignment engaging, educational, and appropriately challenging for the content level. - -user_prompt_template: | - Create a comprehensive practical assignment based on the following YouTube video summary: - - **Video Title:** {title} - **Channel:** {channel} - **Difficulty Level:** {difficulty} - **Content Summary:** - {summary} - - **Key Technical Elements:** - {technical_breakdown} - - **Learning Insights:** - {insights} - - **Practical Applications:** - {applications} - - Generate an assignment that helps students apply these concepts practically and reinforces the learning from this video content. \ No newline at end of file diff --git a/Girish_Basavaraj_Hiremath/session_2/prompt_samples/summarizer_youtube.yaml b/Girish_Basavaraj_Hiremath/session_2/prompt_samples/summarizer_youtube.yaml deleted file mode 100644 index 5747507..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/prompt_samples/summarizer_youtube.yaml +++ /dev/null @@ -1,78 +0,0 @@ -name: TechSummarizerAI -version: 1.0 -description: > - Expert AI assistant specializing in analyzing technical videos for software engineers, - machine learning engineers, and other technical audiences. -prompt: | - # SYSTEM PROMPT - - You are **TechSummarizerAI**, an expert AI assistant specializing in analyzing technical videos for software engineers, machine learning engineers, and other technical audiences. - You will be given: - - **YouTube Video Title** - - **YouTube Video Description** - - **Full Transcript** - - Your goal is to produce a **comprehensive, technical, and structured summary** that highlights key engineering insights, tools, frameworks, architectures, processes, and design decisions from the video. - - --- - - ## ๐ŸŽฏ **Objectives** - 1. **Engineerโ€™s Perspective** โ€” Extract deep technical details, avoiding generic recaps. - 2. **Implementation Relevance** โ€” Show *how* ideas in the video could be applied in real-world engineering. - 3. **Precision** โ€” Avoid speculation. Only summarize from the provided title, description, and transcript. - 4. **Clarity** โ€” Use concise, professional language. - 5. **Structure** โ€” Organize the summary in clear technical sections. - - --- - - ## ๐Ÿ“„ **Final Output Format** - Produce the summary strictly following this Markdown structure: - - ### 1. **High-Level Overview** - - One paragraph capturing the essence of the video from an engineering viewpoint. - - ### 2. **Technical Breakdown** - #### 2.1 Tools, Frameworks, and Libraries - - List all mentioned tools, frameworks, packages, or APIs. - - Include their purpose in the workflow. - - #### 2.2 Architecture / System Design - - Describe the system architecture or process flow discussed. - - Use bullet points or a diagram-like indentation. - - #### 2.3 Step-by-Step Process - - Present each step on its own line. - - Never write multiple steps in a single line inside the `` tag. - - Always start the step number at the beginning of a new line. - - After every step, insert a hard line break. - Example format: - - - 1. **Environment Setup** โ€” Create Conda environment (`conda create -n edu python=3.12`), activate, and install dependencies. - - 2. **Install CrewAI and LangTrace SDK** โ€” `pip install crewai langtrace-python-sdk`. - - 3. **Initialize CrewAI Project** โ€” `crewai create crew edu` to scaffold agents, tasks, and config files. - - - - ### 3. **Key Engineering Insights** - - Lessons, trade-offs, optimizations, and design considerations. - - Performance concerns, scalability notes, and maintainability advice. - - ### 4. **Example Applications** - - At least 2 practical scenarios where the approach could be implemented. - - ### 5. **Limitations / Caveats** - - Technical limitations or conditions where the approach may fail. - - --- - - ## ๐Ÿ” **XML Content Tagging** - Wrap relevant extracted segments from the transcript with XML tags so they can be programmatically identified later. - - Example: - ```xml - Used for creating multiple AI agents that can communicate and collaborate efficiently. - Researcher Agent outputs feed into Content Writer Agent. - Sequential processes can be replaced with parallel execution for efficiency in some contexts. diff --git a/Girish_Basavaraj_Hiremath/session_2/prompt_samples/summarizer_youtube_v2.yaml b/Girish_Basavaraj_Hiremath/session_2/prompt_samples/summarizer_youtube_v2.yaml deleted file mode 100644 index 140a9d3..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/prompt_samples/summarizer_youtube_v2.yaml +++ /dev/null @@ -1,71 +0,0 @@ -name: TechSummarizerAI -version: 1.1 -description: > - Optimized JSON output format for engineering-focused technical video summaries. - Designed for downstream LLM tasks with minimal parsing complexity, consistent key structure, and reduced nesting. -prompt: | - # SYSTEM PROMPT - - You are **TechSummarizerAI**, an expert AI assistant specializing in analyzing technical videos for software engineers, machine learning engineers, and other technical audiences. - You will be given: - - **YouTube Video Title** - - **YouTube Video Description** - - **Full Transcript** - - Your goal is to produce a **comprehensive, technical, and structured summary** highlighting key engineering insights, tools, frameworks, system designs, workflows, and implementation processes from the video. - - --- - - ## ๐ŸŽฏ **Objectives** - 1. **Engineerโ€™s Perspective** โ€” Capture technical details over general narration. - 2. **Implementation Relevance** โ€” Show *how* the videoโ€™s concepts can be applied in real-world engineering. - 3. **Precision** โ€” Summarize strictly from provided inputs; no speculation. - 4. **Clarity** โ€” Maintain concise, professional language. - 5. **Compactness** โ€” Output in an optimized JSON schema for easy parsing by other systems. - - --- - - ## ๐Ÿ“„ **Final Output JSON Schema** - - Your response must be a valid JSON object with this structure: - - { - "high_level_overview": "String โ€” One paragraph capturing the essence of the video from an engineering viewpoint.", - "technical_breakdown": [ - { - "type": "tool", - "name": "String โ€” Tool, framework, package, or API name", - "purpose": "String โ€” Purpose or role in workflow" - }, - { - "type": "architecture", - "description": "String โ€” Detailed architecture or system design notes" - }, - { - "type": "process", - "step_number": "Integer โ€” Step order", - "description": "String โ€” Process step description" - } - ], - "insights": [ - "String โ€” Key engineering insight, trade-off, or optimization" - ], - "applications": [ - "String โ€” Practical application scenario" - ], - "limitations": [ - "String โ€” Known limitation, caveat, or risk" - ] - } - - ## ๐Ÿ“ **Formatting Rules** - - - CRITICAL: Only produce the raw JSON object โ€” no markdown code blocks, no extra text, no ```json wrapper. - - Your response must start with { and end with } as valid JSON. - - technical_breakdown must combine tools, architectures, and processes, differentiated by type. - - Use "type": "process" for ordered steps; always include step_number. - - Use "type": "tool" for any library, API, or SDK; include its name and purpose. - - Use "type": "architecture" for structural/system design descriptions. - - Keep text in complete, professional sentences; no fragments. - - Arrays must contain at least one entry if relevant information is available; omit the array if no data exists. - - Do not merge multiple steps into a single process entry โ€” each step gets its own JSON object. \ No newline at end of file diff --git a/Girish_Basavaraj_Hiremath/session_2/requirements.txt b/Girish_Basavaraj_Hiremath/session_2/requirements.txt deleted file mode 100644 index 75f4345..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -beautifulsoup4 -google-api-core -google-api-python-client -google-auth -google-auth-httplib2 -gradio -gradio_client -huggingface-hub -ipykernel -ipython -lancedb -llama-index -llama-index-vector-stores-lancedb -llama-index-embeddings-huggingface -llama-index-llms-huggingface-api -llama-index-embeddings-openai -llama-index-llms-openrouter -nltk -numpy -pandas -openai -openai-whisper -pydantic -python-dotenv -sentence-transformers -yt-dlp -spacy \ No newline at end of file diff --git a/Girish_Basavaraj_Hiremath/session_2/youtube_video/01_youtube_pipeline_tutorial.ipynb b/Girish_Basavaraj_Hiremath/session_2/youtube_video/01_youtube_pipeline_tutorial.ipynb deleted file mode 100644 index 68a03b7..0000000 --- a/Girish_Basavaraj_Hiremath/session_2/youtube_video/01_youtube_pipeline_tutorial.ipynb +++ /dev/null @@ -1,1136 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# YouTube Video Processing Pipeline Tutorial\n", - "\n", - "This tutorial demonstrates how to build a complete YouTube video processing pipeline that:\n", - "1. **Searches** for YouTube videos based on a query\n", - "2. **Fetches** transcripts from the videos using yt-dlp\n", - "3. **Summarizes** the transcripts using AI via OpenRouter\n", - "\n", - "## What You'll Learn\n", - "\n", - "- How to use the YouTube Data API to search for videos\n", - "- How to extract transcripts using yt-dlp\n", - "- How to create AI-powered summaries using OpenRouter\n", - "- How to build a complete automated pipeline\n", - "\n", - "## Prerequisites\n", - "\n", - "You'll need:\n", - "- YouTube Data API key (free from Google Cloud Console)\n", - "- OpenRouter API key (cheaper alternative to OpenAI)\n", - "\n", - "## Pipeline Architecture\n", - "\n", - "```\n", - "Search Query โ†’ YouTube API โ†’ Video URLs โ†’ Transcript Fetcher โ†’ AI Summarizer โ†’ Final Summaries\n", - "```\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Environment Setup and Configuration\n", - "\n", - "Install required packages and configure API keys for the YouTube pipeline.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install -r \"../requirements.txt\"" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "๐Ÿ“ฆ Importing libraries...\n", - "โœ… All imports successful!\n" - ] - } - ], - "source": [ - "# Install required packages (run this first)\n", - "import subprocess\n", - "import sys\n", - "\n", - "print(\"\\n๐Ÿ“ฆ Importing libraries...\")\n", - "import os\n", - "import re\n", - "import json\n", - "import time\n", - "from pathlib import Path\n", - "from typing import List, Dict, Optional\n", - "\n", - "from googleapiclient.discovery import build\n", - "from yt_dlp import YoutubeDL\n", - "from openai import OpenAI\n", - "\n", - "print(\"โœ… All imports successful!\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Configuration loaded!\n", - "๐Ÿ“ Output folder: youtube_pipeline_output\n" - ] - } - ], - "source": [ - "# https://console.cloud.google.com/\n", - "YOUTUBE_API_KEY = os.environ.get('YOUTUBE_API_KEY') # Get from Google Cloud Console\n", - "\n", - "OPENROUTER_API_KEY = os.environ.get('OPENROUTER_API_KEY') # Get from OpenRouter\n", - "\n", - "# Configuration parameters\n", - "CONFIG = {\n", - " \"youtube\": {\n", - " \"max_results\": 3, # Number of videos to fetch\n", - " \"api_version\": \"v3\", # YouTube API version\n", - " \"order\": \"relevance\" # Search order\n", - " },\n", - " \"transcripts\": {\n", - " \"language\": \"en\", # Transcript language\n", - " \"format\": \"srt\" # Subtitle format\n", - " },\n", - " \"openrouter\": {\n", - " \"model\": \"openai/gpt-4o-mini\", # OpenRouter model (much cheaper than direct OpenAI)\n", - " \"base_url\": \"https://openrouter.ai/api/v1\",\n", - " \"timeout\": 120 # API timeout in seconds\n", - " },\n", - " \"output\": {\n", - " \"folder\": \"youtube_pipeline_output\"\n", - " }\n", - "}\n", - "\n", - "# Create output directories\n", - "output_folder = CONFIG[\"output\"][\"folder\"]\n", - "os.makedirs(output_folder, exist_ok=True)\n", - "os.makedirs(f\"{output_folder}/transcripts\", exist_ok=True)\n", - "os.makedirs(f\"{output_folder}/summaries\", exist_ok=True)\n", - "os.makedirs(f\"{output_folder}/metadata\", exist_ok=True)\n", - "\n", - "print(f\"โœ… Configuration loaded!\")\n", - "print(f\"๐Ÿ“ Output folder: {output_folder}\")\n", - "\n", - "# Verify API keys are set\n", - "if YOUTUBE_API_KEY == \"YOUR_YOUTUBE_API_KEY_HERE\":\n", - " print(\"โš ๏ธ Please set your YouTube API key in the YOUTUBE_API_KEY variable\")\n", - "if OPENROUTER_API_KEY == \"YOUR_OPENROUTER_API_KEY_HERE\":\n", - " print(\"โš ๏ธ Please set your OpenRouter API key in the OPENROUTER_API_KEY variable\")\n", - " print(\"๐Ÿ’ก Get your OpenRouter key at: https://openrouter.ai/keys\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. YouTube Video Search\n", - "\n", - "Search for YouTube videos using the YouTube Data API with duration parsing.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Testing search with query: 'CrewAI tutorial'\n", - "๐Ÿ” Searching for videos: 'CrewAI tutorial'\n", - "โœ… Found 2 videos\n", - "\n", - "๐Ÿ“‹ Search Results:\n", - "\n", - "1. **CrewAI Tutorial | Agentic AI Tutorial**\n", - " Channel: codebasics\n", - " Duration: 1:11:04\n", - " URL: https://www.youtube.com/watch?v=G42J2MSKyc8\n", - "\n", - "2. **CrewAI Tutorial: Complete Crash Course for Beginners**\n", - " Channel: aiwithbrandon\n", - " Duration: 1:05:43\n", - " URL: https://www.youtube.com/watch?v=sPzc6hMg7So\n" - ] - } - ], - "source": [ - "def parse_duration(iso_duration: str) -> str:\n", - " \"\"\"Parse ISO 8601 duration format (PT4M13S) to human-readable format (4:13).\n", - " \n", - " Args:\n", - " iso_duration (str): ISO 8601 duration string (e.g., \"PT4M13S\")\n", - " \n", - " Returns:\n", - " str: Human-readable duration (e.g., \"4:13\")\n", - " \"\"\"\n", - " if not iso_duration:\n", - " return \"Unknown\"\n", - " \n", - " # Parse ISO 8601 duration format\n", - " pattern = r\"PT(?:(\\d+)H)?(?:(\\d+)M)?(?:(\\d+)S)?\"\n", - " match = re.match(pattern, iso_duration)\n", - " \n", - " if not match:\n", - " return \"Unknown\"\n", - " \n", - " hours, minutes, seconds = match.groups()\n", - " hours = int(hours) if hours else 0\n", - " minutes = int(minutes) if minutes else 0\n", - " seconds = int(seconds) if seconds else 0\n", - " \n", - " # Format duration\n", - " if hours > 0:\n", - " return f\"{hours}:{minutes:02d}:{seconds:02d}\"\n", - " else:\n", - " return f\"{minutes}:{seconds:02d}\"\n", - "\n", - "def search_youtube_videos(search_query: str, max_results: int = 3) -> List[Dict]:\n", - " \"\"\"Search for YouTube videos using the YouTube Data API.\n", - " \n", - " Args:\n", - " search_query (str): The search query to find relevant YouTube videos\n", - " max_results (int): Maximum number of results to return\n", - " \n", - " Returns:\n", - " List[Dict]: List of video information dictionaries\n", - " \"\"\"\n", - " print(f\"๐Ÿ” Searching for videos: '{search_query}'\")\n", - " \n", - " try:\n", - " # Build the YouTube API client\n", - " youtube = build(\"youtube\", CONFIG[\"youtube\"][\"api_version\"], developerKey=YOUTUBE_API_KEY)\n", - " \n", - " # Search for videos\n", - " search_request = youtube.search().list(\n", - " q=search_query,\n", - " part=\"id,snippet\",\n", - " maxResults=max_results,\n", - " type=\"video\",\n", - " order=CONFIG[\"youtube\"][\"order\"]\n", - " )\n", - " \n", - " search_response = search_request.execute()\n", - " \n", - " # Extract video IDs and basic info\n", - " video_ids = []\n", - " videos_data = []\n", - " \n", - " for search_result in search_response.get(\"items\", []):\n", - " if \"id\" in search_result and \"videoId\" in search_result[\"id\"]:\n", - " video_id = search_result[\"id\"][\"videoId\"]\n", - " video_ids.append(video_id)\n", - " videos_data.append(search_result)\n", - " \n", - " # Get detailed video information including duration\n", - " videos = []\n", - " if video_ids:\n", - " video_details_request = youtube.videos().list(\n", - " part=\"contentDetails,statistics\", \n", - " id=\",\".join(video_ids)\n", - " )\n", - " video_details_response = video_details_request.execute()\n", - " \n", - " # Create a mapping of video_id to details\n", - " video_details_map = {}\n", - " for video_detail in video_details_response.get(\"items\", []):\n", - " video_details_map[video_detail[\"id\"]] = video_detail\n", - " \n", - " # Build final video information\n", - " for i, search_result in enumerate(videos_data):\n", - " video_id = video_ids[i]\n", - " \n", - " # Get duration from video details\n", - " duration = \"Unknown\"\n", - " if video_id in video_details_map:\n", - " duration_iso = video_details_map[video_id][\"contentDetails\"][\"duration\"]\n", - " duration = parse_duration(duration_iso)\n", - " \n", - " description = search_result[\"snippet\"][\"description\"]\n", - " if len(description) > 200:\n", - " description = description[:200] + \"...\"\n", - " \n", - " video_info = {\n", - " \"title\": search_result[\"snippet\"][\"title\"],\n", - " \"channel\": search_result[\"snippet\"][\"channelTitle\"],\n", - " \"url\": f\"https://www.youtube.com/watch?v={video_id}\",\n", - " \"description\": description,\n", - " \"published_at\": search_result[\"snippet\"][\"publishedAt\"],\n", - " \"video_id\": video_id,\n", - " \"duration\": duration\n", - " }\n", - " videos.append(video_info)\n", - " \n", - " print(f\"โœ… Found {len(videos)} videos\")\n", - " return videos\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error searching YouTube videos: {str(e)}\")\n", - " return []\n", - "\n", - "# Test the search function\n", - "test_query = \"CrewAI tutorial\"\n", - "print(f\"Testing search with query: '{test_query}'\")\n", - "\n", - "if YOUTUBE_API_KEY != \"YOUR_YOUTUBE_API_KEY_HERE\":\n", - " videos = search_youtube_videos(test_query, max_results=2)\n", - " \n", - " # Display results\n", - " print(\"\\n๐Ÿ“‹ Search Results:\")\n", - " for i, video in enumerate(videos, 1):\n", - " print(f\"\\n{i}. **{video['title']}**\")\n", - " print(f\" Channel: {video['channel']}\")\n", - " print(f\" Duration: {video['duration']}\")\n", - " print(f\" URL: {video['url']}\")\n", - "else:\n", - " print(\"โš ๏ธ Please set your YouTube API key to test the search function\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Transcript Fetching\n", - "\n", - "Fetch video transcripts using yt-dlp with proper error handling.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setting up transcript fetcher...\n", - "โœ… Transcript fetcher ready!\n", - "Note: Transcript fetching will be demonstrated in the full pipeline section.\n" - ] - } - ], - "source": [ - "class YouTubeTranscriptFetcher:\n", - " \"\"\"A class to fetch transcripts from YouTube videos using yt-dlp.\"\"\"\n", - " \n", - " def __init__(self, output_folder: str = \"transcripts\", language: str = \"en\"):\n", - " \"\"\"Initialize the transcript fetcher.\n", - " \n", - " Args:\n", - " output_folder (str): Directory where transcripts will be saved\n", - " language (str): Language code for subtitles\n", - " \"\"\"\n", - " self.output_folder = output_folder\n", - " self.language = language\n", - " os.makedirs(output_folder, exist_ok=True)\n", - " \n", - " def _get_ydl_options(self) -> dict:\n", - " \"\"\"Get the yt-dlp options configuration.\n", - " \n", - " Returns:\n", - " dict: Configuration options for yt-dlp\n", - " \"\"\"\n", - " return {\n", - " \"skip_download\": True, # Don't download video\n", - " \"writesubtitles\": True, # Download human captions\n", - " \"writeautomaticsub\": True, # Download auto captions\n", - " \"subtitleslangs\": [self.language], # Language preference\n", - " \"subtitlesformat\": \"srt\", # Format preference\n", - " \"outtmpl\": os.path.join(self.output_folder, \"%(id)s.%(ext)s\"),\n", - " \"ignoreerrors\": False, # Don't ignore errors\n", - " }\n", - " \n", - " def fetch_transcript(self, url: str) -> bool:\n", - " \"\"\"Fetch transcript for a single YouTube video.\n", - " \n", - " Args:\n", - " url (str): YouTube video URL\n", - " \n", - " Returns:\n", - " bool: True if transcript was successfully downloaded, False otherwise\n", - " \"\"\"\n", - " try:\n", - " print(f\"๐Ÿ“ฅ Fetching transcript for: {url}\")\n", - " with YoutubeDL(self._get_ydl_options()) as ydl:\n", - " ydl.download([url])\n", - " print(f\"โœ… Transcript downloaded successfully\")\n", - " return True\n", - " except Exception as e:\n", - " print(f\"โŒ Error downloading transcript: {str(e)}\")\n", - " return False\n", - " \n", - " def fetch_transcripts(self, urls: List[str]) -> Dict[str, bool]:\n", - " \"\"\"Fetch transcripts for multiple YouTube videos.\n", - " \n", - " Args:\n", - " urls (List[str]): List of YouTube video URLs\n", - " \n", - " Returns:\n", - " Dict[str, bool]: Dictionary with URLs as keys and success status as values\n", - " \"\"\"\n", - " if not urls:\n", - " return {}\n", - " \n", - " print(f\"\\n๐Ÿ“ฅ Fetching transcripts for {len(urls)} videos...\")\n", - " results = {}\n", - " \n", - " for url in urls:\n", - " results[url] = self.fetch_transcript(url)\n", - " \n", - " successful = sum(results.values())\n", - " print(f\"\\nโœ… Successfully fetched {successful}/{len(urls)} transcripts\")\n", - " return results\n", - " \n", - " def get_transcript_files(self, video_ids: List[str]) -> List[str]:\n", - " \"\"\"Get list of existing transcript files for given video IDs.\n", - " \n", - " Args:\n", - " video_ids (List[str]): List of video IDs\n", - " \n", - " Returns:\n", - " List[str]: List of existing transcript file paths\n", - " \"\"\"\n", - " transcript_files = []\n", - " \n", - " for video_id in video_ids:\n", - " transcript_path = os.path.join(self.output_folder, f\"{video_id}.{self.language}.srt\")\n", - " if os.path.exists(transcript_path):\n", - " transcript_files.append(transcript_path)\n", - " print(f\"๐Ÿ“„ Found transcript: {transcript_path}\")\n", - " else:\n", - " print(f\"โ“ Missing transcript: {transcript_path}\")\n", - " \n", - " return transcript_files\n", - "\n", - "# Test the transcript fetcher (only if we have search results)\n", - "print(\"Setting up transcript fetcher...\")\n", - "transcript_fetcher = YouTubeTranscriptFetcher(\n", - " output_folder=f\"{CONFIG['output']['folder']}/transcripts\",\n", - " language=CONFIG[\"transcripts\"][\"language\"]\n", - ")\n", - "\n", - "print(\"โœ… Transcript fetcher ready!\")\n", - "print(\"Note: Transcript fetching will be demonstrated in the full pipeline section.\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. AI-Powered Transcript Summarization\n", - "\n", - "Create structured summaries using OpenRouter with a professional summarization prompt.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setting up transcript summarizer...\n", - "โœ… Transcript summarizer ready!\n" - ] - } - ], - "source": [ - "# Define the prompt for video summarization\n", - "SUMMARIZER_PROMPT = \"\"\"\n", - "You are **TechSummarizerAI**, an expert AI assistant specializing in analyzing technical videos for software engineers, machine learning engineers, and other technical audiences.\n", - "\n", - "Your goal is to produce a **comprehensive, technical, and structured summary** highlighting key engineering insights, tools, frameworks, system designs, workflows, and implementation processes from the video.\n", - "\n", - "## Objectives\n", - "1. **Engineer's Perspective** โ€” Capture technical details over general narration.\n", - "2. **Implementation Relevance** โ€” Show *how* the video's concepts can be applied in real-world engineering.\n", - "3. **Precision** โ€” Summarize strictly from provided inputs; no speculation.\n", - "4. **Clarity** โ€” Maintain concise, professional language.\n", - "\n", - "## Output JSON Schema\n", - "\n", - "Your response must be a valid JSON object with this structure:\n", - "\n", - "{\n", - " \"high_level_overview\": \"String โ€” One paragraph capturing the essence of the video from an engineering viewpoint.\",\n", - " \"technical_breakdown\": [\n", - " {\n", - " \"type\": \"tool\", \n", - " \"name\": \"String โ€” Tool, framework, package, or API name\",\n", - " \"purpose\": \"String โ€” Purpose or role in workflow\"\n", - " },\n", - " {\n", - " \"type\": \"architecture\",\n", - " \"description\": \"String โ€” Detailed architecture or system design notes\"\n", - " },\n", - " {\n", - " \"type\": \"process\",\n", - " \"step_number\": \"Integer โ€” Step order\",\n", - " \"description\": \"String โ€” Process step description\"\n", - " }\n", - " ],\n", - " \"insights\": [\n", - " \"String โ€” Key engineering insight, trade-off, or optimization\"\n", - " ],\n", - " \"applications\": [\n", - " \"String โ€” Practical application scenario\"\n", - " ],\n", - " \"limitations\": [\n", - " \"String โ€” Known limitation, caveat, or risk\"\n", - " ]\n", - "}\n", - "\n", - "## Formatting Rules\n", - "\n", - "- CRITICAL: Only produce the raw JSON object โ€” no markdown code blocks, no extra text, no ```json wrapper.\n", - "- Your response must start with { and end with } as valid JSON.\n", - "- Keep text in complete, professional sentences; no fragments.\n", - "- Arrays must contain at least one entry if relevant information is available; omit empty arrays.\n", - "\"\"\"\n", - "\n", - "class YouTubeTranscriptSummarizer:\n", - " \"\"\"A class to summarize YouTube SRT transcript files using OpenRouter.\"\"\"\n", - " \n", - " def __init__(self, api_key: Optional[str] = None):\n", - " \"\"\"Initialize the transcript summarizer.\n", - " \n", - " Args:\n", - " api_key (Optional[str]): OpenRouter API key. If None, uses OPENROUTER_API_KEY\n", - " \"\"\"\n", - " self.client = OpenAI(\n", - " api_key=api_key or OPENROUTER_API_KEY,\n", - " base_url=CONFIG[\"openrouter\"][\"base_url\"]\n", - " )\n", - " self.model = CONFIG[\"openrouter\"][\"model\"]\n", - " self.timeout = CONFIG[\"openrouter\"][\"timeout\"]\n", - " \n", - " def _read_srt_file(self, srt_path: str) -> str:\n", - " \"\"\"Read and parse SRT file content.\n", - " \n", - " Args:\n", - " srt_path (str): Path to the SRT file\n", - " \n", - " Returns:\n", - " str: The content of the SRT file as plain text\n", - " \"\"\"\n", - " print(f\"๐Ÿ“– Reading SRT file: {os.path.basename(srt_path)}\")\n", - " \n", - " with open(srt_path, \"r\", encoding=\"utf-8\") as file:\n", - " content = file.read()\n", - " \n", - " # Basic SRT parsing - extract just the text content\n", - " lines = content.split(\"\\n\")\n", - " text_lines = []\n", - " \n", - " for line in lines:\n", - " line = line.strip()\n", - " # Skip sequence numbers, timestamps, and empty lines\n", - " if line and not line.isdigit() and \"-->\" not in line:\n", - " text_lines.append(line)\n", - " \n", - " transcript_text = \" \".join(text_lines)\n", - " print(f\"๐Ÿ“Š Extracted {len(text_lines)} text lines, {len(transcript_text)} characters total\")\n", - " return transcript_text\n", - " \n", - " def summarize_transcript(\n", - " self, \n", - " srt_path: str, \n", - " video_title: str = \"\", \n", - " video_description: str = \"\", \n", - " output_path: Optional[str] = None\n", - " ) -> str:\n", - " \"\"\"Summarize a YouTube SRT transcript file.\n", - " \n", - " Args:\n", - " srt_path (str): Path to the SRT file to summarize\n", - " video_title (str): Title of the video (optional)\n", - " video_description (str): Description of the video (optional)\n", - " output_path (Optional[str]): Path to save the summary. If None, returns summary\n", - " \n", - " Returns:\n", - " str: The generated summary\n", - " \"\"\"\n", - " print(f\"\\n๐Ÿค– Starting summarization for: {os.path.basename(srt_path)}\")\n", - " \n", - " # Read the SRT file\n", - " transcript_text = self._read_srt_file(srt_path)\n", - " \n", - " # Prepare the user message with video details and transcript\n", - " user_message = f\"\"\"\n", - "**YouTube Video Title:** {video_title if video_title else \"Not provided\"}\n", - "\n", - "**YouTube Video Description:** {video_description if video_description else \"Not provided\"}\n", - "\n", - "**Full Transcript:**\n", - "{transcript_text}\n", - "\"\"\"\n", - " \n", - " try:\n", - " print(f\"๐Ÿ”„ Making API call to OpenRouter...\")\n", - " \n", - " # Make API call to OpenRouter\n", - " response = self.client.chat.completions.create(\n", - " model=self.model,\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": SUMMARIZER_PROMPT},\n", - " {\"role\": \"user\", \"content\": user_message}\n", - " ],\n", - " timeout=self.timeout\n", - " )\n", - " \n", - " summary = response.choices[0].message.content\n", - " \n", - " # Save or return the summary\n", - " if output_path:\n", - " print(f\"๐Ÿ’พ Saving summary to: {os.path.basename(output_path)}\")\n", - " with open(output_path, \"w\", encoding=\"utf-8\") as file:\n", - " file.write(summary)\n", - " \n", - " print(f\"โœ… Summarization completed\")\n", - " return summary\n", - " \n", - " except Exception as e:\n", - " print(f\"โŒ Error during summarization: {str(e)}\")\n", - " return \"\"\n", - " \n", - " def summarize_transcripts(\n", - " self, \n", - " transcript_paths: List[str], \n", - " videos: List[Dict], \n", - " output_folder: str\n", - " ) -> Dict[str, bool]:\n", - " \"\"\"Summarize multiple transcript files.\n", - " \n", - " Args:\n", - " transcript_paths (List[str]): List of SRT file paths\n", - " videos (List[Dict]): List of video information for context\n", - " output_folder (str): Folder to save summaries\n", - " \n", - " Returns:\n", - " Dict[str, bool]: Dictionary with file paths as keys and success status as values\n", - " \"\"\"\n", - " if not transcript_paths:\n", - " return {}\n", - " \n", - " print(f\"\\n๐Ÿค– Starting batch summarization for {len(transcript_paths)} transcripts\")\n", - " \n", - " # Create a mapping of video IDs to video info\n", - " video_info_map = {video[\"video_id\"]: video for video in videos}\n", - " \n", - " results = {}\n", - " \n", - " for transcript_path in transcript_paths:\n", - " # Extract video ID from filename\n", - " filename = os.path.basename(transcript_path)\n", - " video_id = filename.split(\".\")[0]\n", - " video_info = video_info_map.get(video_id, {})\n", - " \n", - " # Create output path\n", - " summary_filename = f\"{video_id}_summary.json\"\n", - " summary_path = os.path.join(output_folder, summary_filename)\n", - " \n", - " # Summarize transcript\n", - " summary = self.summarize_transcript(\n", - " transcript_path,\n", - " video_info.get(\"title\", \"\"),\n", - " video_info.get(\"description\", \"\"),\n", - " summary_path\n", - " )\n", - " \n", - " results[transcript_path] = bool(summary)\n", - " \n", - " successful = sum(results.values())\n", - " print(f\"\\nโœ… Successfully summarized {successful}/{len(transcript_paths)} transcripts\")\n", - " return results\n", - "\n", - "# Initialize the summarizer\n", - "print(\"Setting up transcript summarizer...\")\n", - "\n", - "if OPENROUTER_API_KEY != \"YOUR_OPENROUTER_API_KEY_HERE\":\n", - " summarizer = YouTubeTranscriptSummarizer()\n", - " print(\"โœ… Transcript summarizer ready!\")\n", - "else:\n", - " print(\"โš ๏ธ Please set your OpenRouter API key to use the summarizer\")\n", - " summarizer = None\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Complete Pipeline Integration\n", - "\n", - "Integrate all components into a single automated pipeline with comprehensive error handling.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "โœ… Pipeline class defined and ready to use!\n" - ] - } - ], - "source": [ - "class YouTubePipeline:\n", - " \"\"\"Complete pipeline for YouTube video processing.\n", - " \n", - " This class integrates video search, transcript fetching, and summarization\n", - " into a single automated workflow.\n", - " \"\"\"\n", - " \n", - " def __init__(self, output_folder: str = \"youtube_pipeline_output\"):\n", - " \"\"\"Initialize the YouTube pipeline.\n", - " \n", - " Args:\n", - " output_folder (str): Base output folder for all results\n", - " \"\"\"\n", - " self.output_folder = output_folder\n", - " \n", - " # Create output directories\n", - " self.transcripts_folder = os.path.join(output_folder, \"transcripts\")\n", - " self.summaries_folder = os.path.join(output_folder, \"summaries\")\n", - " self.metadata_folder = os.path.join(output_folder, \"metadata\")\n", - " \n", - " for folder in [self.transcripts_folder, self.summaries_folder, self.metadata_folder]:\n", - " os.makedirs(folder, exist_ok=True)\n", - " \n", - " # Initialize components\n", - " self.transcript_fetcher = YouTubeTranscriptFetcher(\n", - " output_folder=self.transcripts_folder,\n", - " language=CONFIG[\"transcripts\"][\"language\"]\n", - " )\n", - " \n", - " if OPENROUTER_API_KEY != \"YOUR_OPENROUTER_API_KEY_HERE\":\n", - " self.summarizer = YouTubeTranscriptSummarizer()\n", - " else:\n", - " self.summarizer = None\n", - " \n", - " print(f\"๐Ÿš€ Pipeline initialized\")\n", - " print(f\"๐Ÿ“ Output folder: {self.output_folder}\")\n", - " \n", - " def run_pipeline(self, search_query: str, max_videos: int = 3) -> Dict:\n", - " \"\"\"Run the complete pipeline with the given search query.\n", - " \n", - " Args:\n", - " search_query (str): The search query for YouTube videos\n", - " max_videos (int): Maximum number of videos to process\n", - " \n", - " Returns:\n", - " Dict: Complete pipeline results including all steps\n", - " \"\"\"\n", - " pipeline_start_time = time.time()\n", - " \n", - " print(\"=\" * 80)\n", - " print(f\"๐ŸŽฌ YOUTUBE PROCESSING PIPELINE\")\n", - " print(f\"๐Ÿ“ Search Query: '{search_query}'\")\n", - " print(f\"๐ŸŽฏ Max Videos: {max_videos}\")\n", - " print(f\"โฐ Start Time: {time.strftime('%Y-%m-%d %H:%M:%S')}\")\n", - " print(\"=\" * 80)\n", - " \n", - " # Step 1: Search for videos\n", - " print(\"\\n\" + \"=\" * 50)\n", - " print(\"๐Ÿ“บ STEP 1: VIDEO SEARCH\")\n", - " print(\"=\" * 50)\n", - " \n", - " if YOUTUBE_API_KEY == \"YOUR_YOUTUBE_API_KEY_HERE\":\n", - " print(\"โŒ YouTube API key not set. Please set YOUTUBE_API_KEY.\")\n", - " return {\"success\": False, \"error\": \"YouTube API key not set\"}\n", - " \n", - " videos = search_youtube_videos(search_query, max_videos)\n", - " \n", - " if not videos:\n", - " print(\"โŒ No videos found. Pipeline terminated.\")\n", - " return {\n", - " \"success\": False,\n", - " \"error\": \"No videos found for the search query\",\n", - " \"search_query\": search_query,\n", - " \"videos_found\": 0\n", - " }\n", - " \n", - " # Save search metadata\n", - " search_metadata = {\n", - " \"search_query\": search_query,\n", - " \"timestamp\": time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n", - " \"total_videos_found\": len(videos),\n", - " \"videos\": videos\n", - " }\n", - " \n", - " search_metadata_path = os.path.join(\n", - " self.metadata_folder, f\"search_results_{int(time.time())}.json\"\n", - " )\n", - " with open(search_metadata_path, \"w\", encoding=\"utf-8\") as f:\n", - " json.dump(search_metadata, f, indent=2, ensure_ascii=False)\n", - " \n", - " # Step 2: Fetch transcripts\n", - " print(\"\\n\" + \"=\" * 50)\n", - " print(\"๐Ÿ“ฅ STEP 2: TRANSCRIPT FETCHING\")\n", - " print(\"=\" * 50)\n", - " \n", - " # Extract URLs from video data\n", - " urls = [video[\"url\"] for video in videos]\n", - " \n", - " # Fetch transcripts\n", - " fetch_results = self.transcript_fetcher.fetch_transcripts(urls)\n", - " \n", - " # Determine successful transcript files\n", - " video_ids = [video[\"video_id\"] for video in videos]\n", - " transcript_paths = self.transcript_fetcher.get_transcript_files(video_ids)\n", - " \n", - " if not transcript_paths:\n", - " print(\"โŒ No transcripts could be fetched. Pipeline terminated.\")\n", - " return {\n", - " \"success\": False,\n", - " \"error\": \"No transcripts could be fetched\",\n", - " \"search_query\": search_query,\n", - " \"videos_found\": len(videos),\n", - " \"transcripts_fetched\": 0\n", - " }\n", - " \n", - " # Step 3: Summarize transcripts\n", - " print(\"\\n\" + \"=\" * 50)\n", - " print(\"๐Ÿค– STEP 3: TRANSCRIPT SUMMARIZATION\")\n", - " print(\"=\" * 50)\n", - " \n", - " if not self.summarizer:\n", - " print(\"โš ๏ธ OpenRouter API key not set. Skipping summarization.\")\n", - " summarization_results = {}\n", - " else:\n", - " summarization_results = self.summarizer.summarize_transcripts(\n", - " transcript_paths, videos, self.summaries_folder\n", - " )\n", - " \n", - " # Calculate final results\n", - " pipeline_end_time = time.time()\n", - " pipeline_duration = pipeline_end_time - pipeline_start_time\n", - " successful_summaries = sum(summarization_results.values()) if summarization_results else 0\n", - " \n", - " # Create final results\n", - " final_results = {\n", - " \"success\": True,\n", - " \"search_query\": search_query,\n", - " \"pipeline_duration_seconds\": round(pipeline_duration, 2),\n", - " \"timestamp\": time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n", - " \"videos_found\": len(videos),\n", - " \"transcripts_fetched\": len(transcript_paths),\n", - " \"summaries_created\": successful_summaries,\n", - " \"output_folder\": self.output_folder,\n", - " \"videos\": videos,\n", - " \"transcript_paths\": transcript_paths,\n", - " \"summarization_results\": summarization_results\n", - " }\n", - " \n", - " # Save final results\n", - " results_path = os.path.join(\n", - " self.metadata_folder, f\"pipeline_results_{int(time.time())}.json\"\n", - " )\n", - " with open(results_path, \"w\", encoding=\"utf-8\") as f:\n", - " json.dump(final_results, f, indent=2, ensure_ascii=False)\n", - " \n", - " # Print final summary\n", - " print(\"\\n\" + \"=\" * 80)\n", - " print(\"๐ŸŽ‰ PIPELINE COMPLETED\")\n", - " print(\"=\" * 80)\n", - " print(f\"๐Ÿ“ Search Query: '{search_query}'\")\n", - " print(f\"๐Ÿ“บ Videos Found: {len(videos)}\")\n", - " print(f\"๐Ÿ“„ Transcripts Fetched: {len(transcript_paths)}\")\n", - " print(f\"๐Ÿค– Summaries Created: {successful_summaries}\")\n", - " print(f\"โฑ๏ธ Total Duration: {pipeline_duration:.2f} seconds\")\n", - " print(f\"๐Ÿ“ Output Folder: {self.output_folder}\")\n", - " print(f\"๐Ÿ’พ Results Saved: {results_path}\")\n", - " print(\"=\" * 80)\n", - " \n", - " return final_results\n", - "\n", - "print(\"โœ… Pipeline class defined and ready to use!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Run the Complete Pipeline\n", - "\n", - "Execute the complete pipeline with a sample query and display results.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "๐Ÿš€ Pipeline initialized\n", - "๐Ÿ“ Output folder: youtube_pipeline_output\n", - "๐Ÿš€ Ready to run pipeline with query: 'Python FastAPI tutorial'\n", - "๐ŸŽฏ Max videos: 2\n", - "\n", - "๐ŸŽฌ Starting pipeline...\n", - "================================================================================\n", - "๐ŸŽฌ YOUTUBE PROCESSING PIPELINE\n", - "๐Ÿ“ Search Query: 'Python FastAPI tutorial'\n", - "๐ŸŽฏ Max Videos: 2\n", - "โฐ Start Time: 2025-09-20 13:04:44\n", - "================================================================================\n", - "\n", - "==================================================\n", - "๐Ÿ“บ STEP 1: VIDEO SEARCH\n", - "==================================================\n", - "๐Ÿ” Searching for videos: 'Python FastAPI tutorial'\n", - "โœ… Found 2 videos\n", - "\n", - "==================================================\n", - "๐Ÿ“ฅ STEP 2: TRANSCRIPT FETCHING\n", - "==================================================\n", - "\n", - "๐Ÿ“ฅ Fetching transcripts for 2 videos...\n", - "๐Ÿ“ฅ Fetching transcript for: https://www.youtube.com/watch?v=iWS9ogMPOI0\n", - "[youtube] Extracting URL: https://www.youtube.com/watch?v=iWS9ogMPOI0\n", - "[youtube] iWS9ogMPOI0: Downloading webpage\n", - "[youtube] iWS9ogMPOI0: Downloading tv simply player API JSON\n", - "[youtube] iWS9ogMPOI0: Downloading tv client config\n", - "[youtube] iWS9ogMPOI0: Downloading tv player API JSON\n", - "[info] iWS9ogMPOI0: Downloading subtitles: en\n", - "[info] iWS9ogMPOI0: Downloading 1 format(s): 399+251\n", - "Deleting existing file youtube_pipeline_output/transcripts/iWS9ogMPOI0.en.srt\n", - "[info] Writing video subtitles to: youtube_pipeline_output/transcripts/iWS9ogMPOI0.en.srt\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: The extractor specified to use impersonation for this download, but no impersonate target is available. If you encounter errors, then see https://github.com/yt-dlp/yt-dlp#impersonation for information on installing the required dependencies\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[download] Destination: youtube_pipeline_output/transcripts/iWS9ogMPOI0.en.srt\n", - "[download] 100% of 25.18KiB in 00:00:00 at 335.08KiB/s\n", - "โœ… Transcript downloaded successfully\n", - "๐Ÿ“ฅ Fetching transcript for: https://www.youtube.com/watch?v=Wr1JjhTt1Xg\n", - "[youtube] Extracting URL: https://www.youtube.com/watch?v=Wr1JjhTt1Xg\n", - "[youtube] Wr1JjhTt1Xg: Downloading webpage\n", - "[youtube] Wr1JjhTt1Xg: Downloading tv simply player API JSON\n", - "[youtube] Wr1JjhTt1Xg: Downloading tv client config\n", - "[youtube] Wr1JjhTt1Xg: Downloading tv player API JSON\n", - "[info] Wr1JjhTt1Xg: Downloading subtitles: en\n", - "[info] Wr1JjhTt1Xg: Downloading 1 format(s): 313+251\n", - "Deleting existing file youtube_pipeline_output/transcripts/Wr1JjhTt1Xg.en.srt\n", - "[info] Writing video subtitles to: youtube_pipeline_output/transcripts/Wr1JjhTt1Xg.en.srt\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: The extractor specified to use impersonation for this download, but no impersonate target is available. If you encounter errors, then see https://github.com/yt-dlp/yt-dlp#impersonation for information on installing the required dependencies\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[download] Destination: youtube_pipeline_output/transcripts/Wr1JjhTt1Xg.en.srt\n", - "[download] 100% of 26.55KiB in 00:00:00 at 370.35KiB/s\n", - "โœ… Transcript downloaded successfully\n", - "\n", - "โœ… Successfully fetched 2/2 transcripts\n", - "๐Ÿ“„ Found transcript: youtube_pipeline_output/transcripts/iWS9ogMPOI0.en.srt\n", - "๐Ÿ“„ Found transcript: youtube_pipeline_output/transcripts/Wr1JjhTt1Xg.en.srt\n", - "\n", - "==================================================\n", - "๐Ÿค– STEP 3: TRANSCRIPT SUMMARIZATION\n", - "==================================================\n", - "\n", - "๐Ÿค– Starting batch summarization for 2 transcripts\n", - "\n", - "๐Ÿค– Starting summarization for: iWS9ogMPOI0.en.srt\n", - "๐Ÿ“– Reading SRT file: iWS9ogMPOI0.en.srt\n", - "๐Ÿ“Š Extracted 301 text lines, 15355 characters total\n", - "๐Ÿ”„ Making API call to OpenRouter...\n", - "๐Ÿ’พ Saving summary to: iWS9ogMPOI0_summary.json\n", - "โœ… Summarization completed\n", - "\n", - "๐Ÿค– Starting summarization for: Wr1JjhTt1Xg.en.srt\n", - "๐Ÿ“– Reading SRT file: Wr1JjhTt1Xg.en.srt\n", - "๐Ÿ“Š Extracted 411 text lines, 12909 characters total\n", - "๐Ÿ”„ Making API call to OpenRouter...\n", - "๐Ÿ’พ Saving summary to: Wr1JjhTt1Xg_summary.json\n", - "โœ… Summarization completed\n", - "\n", - "โœ… Successfully summarized 2/2 transcripts\n", - "\n", - "================================================================================\n", - "๐ŸŽ‰ PIPELINE COMPLETED\n", - "================================================================================\n", - "๐Ÿ“ Search Query: 'Python FastAPI tutorial'\n", - "๐Ÿ“บ Videos Found: 2\n", - "๐Ÿ“„ Transcripts Fetched: 2\n", - "๐Ÿค– Summaries Created: 2\n", - "โฑ๏ธ Total Duration: 34.54 seconds\n", - "๐Ÿ“ Output Folder: youtube_pipeline_output\n", - "๐Ÿ’พ Results Saved: youtube_pipeline_output/metadata/pipeline_results_1758353718.json\n", - "================================================================================\n", - "\n", - "๐Ÿ“Š FINAL SUMMARY:\n", - "โœ… Successfully processed 2 videos\n", - "๐Ÿ“„ Fetched 2 transcripts\n", - "๐Ÿค– Generated 2 summaries\n", - "โฑ๏ธ Completed in 34.54 seconds\n", - "\n", - "๐Ÿ“ Check your results in: youtube_pipeline_output\n" - ] - } - ], - "source": [ - "# Initialize the pipeline\n", - "pipeline = YouTubePipeline(output_folder=CONFIG[\"output\"][\"folder\"])\n", - "\n", - "# Define our search query\n", - "search_query = \"Python FastAPI tutorial\"\n", - "max_videos = 2 \n", - "\n", - "print(f\"๐Ÿš€ Ready to run pipeline with query: '{search_query}'\")\n", - "print(f\"๐ŸŽฏ Max videos: {max_videos}\")\n", - "\n", - "# Check if API keys are set\n", - "if YOUTUBE_API_KEY == \"YOUR_YOUTUBE_API_KEY_HERE\":\n", - " print(\"\\nโš ๏ธ To run the pipeline, please set your API keys:\")\n", - " print(\"1. YOUTUBE_API_KEY = 'your_youtube_api_key'\")\n", - " print(\"2. OPENROUTER_API_KEY = 'your_openrouter_api_key'\")\n", - " print(\"\\nThen re-run this cell to execute the pipeline.\")\n", - "else:\n", - " print(\"\\n๐ŸŽฌ Starting pipeline...\")\n", - " # Run the complete pipeline\n", - " results = pipeline.run_pipeline(search_query, max_videos)\n", - " \n", - " # Display summary of results\n", - " if results[\"success\"]:\n", - " print(f\"\\n๐Ÿ“Š FINAL SUMMARY:\")\n", - " print(f\"โœ… Successfully processed {results['videos_found']} videos\")\n", - " print(f\"๐Ÿ“„ Fetched {results['transcripts_fetched']} transcripts\")\n", - " print(f\"๐Ÿค– Generated {results['summaries_created']} summaries\")\n", - " print(f\"โฑ๏ธ Completed in {results['pipeline_duration_seconds']} seconds\")\n", - " print(f\"\\n๐Ÿ“ Check your results in: {results['output_folder']}\")\n", - " else:\n", - " print(f\"\\nโŒ Pipeline failed: {results.get('error', 'Unknown error')}\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## ๐ŸŽ‰ Tutorial Complete!\n", - "\n", - "You've successfully built a complete YouTube video processing pipeline! \n", - "\n", - "### ๐Ÿ› ๏ธ What You Built\n", - "\n", - "1. **๐Ÿ” Video Search** - YouTube Data API integration with duration parsing\n", - "2. **๐Ÿ“ฅ Transcript Extraction** - yt-dlp integration with error handling\n", - "3. **๐Ÿค– AI Summarization** - OpenRouter integration with structured prompts\n", - "4. **๐Ÿ”— Complete Pipeline** - Automated end-to-end processing\n", - "\n", - "### ๐Ÿ“Š Pipeline Architecture\n", - "\n", - "```\n", - "Search Query โ†’ YouTube API โ†’ Video URLs โ†’ Transcript Fetcher โ†’ AI Summarizer โ†’ Results\n", - "```\n", - "\n", - "### ๐Ÿš€ Next Steps\n", - "\n", - "**Immediate Improvements:**\n", - "- Modify the search query to explore different topics\n", - "- Adjust `max_videos` parameter for batch processing\n", - "- Customize the AI prompt for specific use cases\n", - "\n", - "**Advanced Features:**\n", - "- Add video filtering (duration, views, upload date)\n", - "- Implement parallel processing for multiple videos\n", - "- Create a web interface for easier usage\n", - "- Add database storage for results\n", - "\n", - "### ๐Ÿ”‘ API Setup\n", - "\n", - "**YouTube Data API (Free):**\n", - "- Visit [Google Cloud Console](https://console.cloud.google.com/)\n", - "- Enable YouTube Data API v3\n", - "- Create and copy API key\n", - "\n", - "**OpenRouter (Affordable AI):**\n", - "- Visit [OpenRouter](https://openrouter.ai/keys)\n", - "- Sign up and generate API key\n", - "- Add credits (starts from $5)\n", - "\n", - "### ๐Ÿ“ Output Files\n", - "\n", - "Your pipeline generates:\n", - "- `transcripts/*.srt` - Original video transcripts\n", - "- `summaries/*.json` - Structured AI summaries\n", - "- `metadata/*.json` - Search and pipeline metadata\n", - "\n", - "Happy coding! ๐Ÿš€\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "accelerator", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Girish_Ramarao/README.md b/Girish_Ramarao/README.md deleted file mode 100644 index 9b3b82b..0000000 --- a/Girish_Ramarao/README.md +++ /dev/null @@ -1 +0,0 @@ -# Girish_Ramarao diff --git a/Guchi_Jaurre/README.md b/Guchi_Jaurre/README.md deleted file mode 100644 index 122abb7..0000000 --- a/Guchi_Jaurre/README.md +++ /dev/null @@ -1 +0,0 @@ -# Guchi_Jaurre diff --git a/Gururaja_Kalmanje/README.md b/Gururaja_Kalmanje/README.md deleted file mode 100644 index d67d15b..0000000 --- a/Gururaja_Kalmanje/README.md +++ /dev/null @@ -1 +0,0 @@ -# Gururaja_Kalmanje diff --git "a/IT_CARE_Pawe\305\202_\305\201abuz/README.md" "b/IT_CARE_Pawe\305\202_\305\201abuz/README.md" deleted file mode 100644 index 3532495..0000000 --- "a/IT_CARE_Pawe\305\202_\305\201abuz/README.md" +++ /dev/null @@ -1 +0,0 @@ -# IT_CARE_Paweล‚_ลabuz diff --git a/Ivan_Lewis/README.md b/Ivan_Lewis/README.md deleted file mode 100644 index f907631..0000000 --- a/Ivan_Lewis/README.md +++ /dev/null @@ -1 +0,0 @@ -# Ivan_Lewis diff --git a/Jatin_Garg/README.md b/Jatin_Garg/README.md deleted file mode 100644 index f5b73c2..0000000 --- a/Jatin_Garg/README.md +++ /dev/null @@ -1 +0,0 @@ -# Jatin_Garg diff --git a/Jayanthi_K/README.md b/Jayanthi_K/README.md deleted file mode 100644 index fcc8b56..0000000 --- a/Jayanthi_K/README.md +++ /dev/null @@ -1 +0,0 @@ -# Jayanthi_K diff --git a/Jaywant_D_Mahajan/README.md b/Jaywant_D_Mahajan/README.md deleted file mode 100644 index c764b12..0000000 --- a/Jaywant_D_Mahajan/README.md +++ /dev/null @@ -1 +0,0 @@ -# Jaywant_D_Mahajan diff --git a/Leonel_Vanegas/README.md b/Leonel_Vanegas/README.md deleted file mode 100644 index 3c3e438..0000000 --- a/Leonel_Vanegas/README.md +++ /dev/null @@ -1 +0,0 @@ -# Leonel_Vanegas diff --git a/M_MohanaVamsi/README.md b/M_MohanaVamsi/README.md deleted file mode 100644 index 55ef669..0000000 --- a/M_MohanaVamsi/README.md +++ /dev/null @@ -1 +0,0 @@ -# M_MohanaVamsi diff --git a/Mahender_Endarapu/README.md b/Mahender_Endarapu/README.md deleted file mode 100644 index d5e4116..0000000 --- a/Mahender_Endarapu/README.md +++ /dev/null @@ -1 +0,0 @@ -# Mahender_Endarapu diff --git a/Mamta_kumari/README.md b/Mamta_kumari/README.md deleted file mode 100644 index f09c82b..0000000 --- a/Mamta_kumari/README.md +++ /dev/null @@ -1 +0,0 @@ -# Mamta_kumari diff --git a/Manish_Kumar_Pathak/README.md b/Manish_Kumar_Pathak/README.md deleted file mode 100644 index 88df172..0000000 --- a/Manish_Kumar_Pathak/README.md +++ /dev/null @@ -1 +0,0 @@ -# Manish_Kumar_Pathak diff --git a/Manohar_Negi/README.md b/Manohar_Negi/README.md deleted file mode 100644 index 0d6b936..0000000 --- a/Manohar_Negi/README.md +++ /dev/null @@ -1 +0,0 @@ -# Manohar_Negi diff --git a/Maruti_Divekar/Day_03/README.md b/Maruti_Divekar/Day_03/README.md deleted file mode 100644 index 4911f76..0000000 --- a/Maruti_Divekar/Day_03/README.md +++ /dev/null @@ -1 +0,0 @@ -Maruti Divekar diff --git a/Maruti_Divekar/Day_03/Streamlit_App.jpg b/Maruti_Divekar/Day_03/Streamlit_App.jpg deleted file mode 100644 index 46929ce..0000000 Binary files a/Maruti_Divekar/Day_03/Streamlit_App.jpg and /dev/null differ diff --git a/Maruti_Divekar/Day_03/app.py b/Maruti_Divekar/Day_03/app.py deleted file mode 100644 index 580be9a..0000000 --- a/Maruti_Divekar/Day_03/app.py +++ /dev/null @@ -1,112 +0,0 @@ -import streamlit as st -from datetime import datetime -import time -import io - -st.set_page_config(page_title="Demo Assistant", layout="wide") - -# -------- Initialize State -------- -if "chat_history" not in st.session_state: - st.session_state.chat_history = [] -if "start_time" not in st.session_state: - st.session_state.start_time = time.time() -if "messages_sent" not in st.session_state: - st.session_state.messages_sent = 0 -if "show_dev_info" not in st.session_state: - st.session_state.show_dev_info = False - -# -------- Sidebar Configuration -------- -with st.sidebar: - st.title("๐Ÿ› ๏ธ Configuration") - st.header("Assistant Settings") - assistant_name = st.text_input("Assistant Name", value="Demo Assistant") - response_style = st.selectbox("Response Style", options=["Friendly", "Formal", "Concise"]) - st.header("Chat Settings") - max_history = st.slider("Max Chat History", 10, 100, 40) - show_timestamps = st.checkbox("Show Timestamps", value=True) - - st.header("Session Stats") - duration = int(time.time() - st.session_state.start_time) - minutes, seconds = divmod(duration, 60) - st.markdown(f"**Session Duration**: {minutes}m {seconds}s") - st.markdown(f"**Messages Sent**: {st.session_state.messages_sent}") - st.markdown(f"**Total Messages**: {len(st.session_state.chat_history)}") - - st.header("Actions") - col1, col2 = st.columns(2) - with col1: - if st.button("Clear Chat"): - st.session_state.chat_history = [] - st.session_state.messages_sent = 0 - st.session_state.start_time = time.time() - with col2: - chat_txt = "\n".join( - [f"[{msg['timestamp']}] {msg['sender']}: {msg['message']}" for msg in st.session_state.chat_history] - ) - st.download_button("Export Chat", data=chat_txt, file_name="chat_history.txt") - - -# -------- Main Chat UI -------- -st.markdown( - f"

๐Ÿš€ {assistant_name}

" - f"Response Style: {response_style} | History Limit: {max_history} messages", - unsafe_allow_html=True, -) -st.info("Hello! I'm your demo assistant. How can I help you today?") - -# -------- Chat History Cards -------- -for msg in st.session_state.chat_history[-max_history:]: - icon = "๐Ÿง‘" if msg["sender"] == "You" else "๐Ÿค–" - bg_col = "#222" if msg["sender"] == "You" else "#353a47" - text_col = "#fff" - sender_str = f"{icon} {msg['sender']}" - time_str = f"{msg['timestamp']}" if show_timestamps else "" - st.markdown( - f""" -
- {sender_str} {time_str}
{msg['message']} -
- """, unsafe_allow_html=True - ) - -# -------- Expanders and Dev Info -------- -with st.expander("About This Demo"): - st.write("This is a sample Streamlit Chat Assistant inspired by your frontend screenshot.") - -with st.expander("Instructor Notes"): - st.write("You can extend this code to include actual AI/model integration.") - -st.session_state.show_dev_info = st.checkbox("Show Development Info", value=st.session_state.show_dev_info) -if st.session_state.show_dev_info: - st.write(" ---- Development Info ---- ") - -# -------- Chat Input and Sending -------- -with st.form(key="chat_form", clear_on_submit=True): - message = st.text_input("Message", placeholder=f"Message {assistant_name}...") - submitted = st.form_submit_button("Send") - if submitted and message.strip(): - timestamp = datetime.now().strftime("%H:%M:%S") - # Add user message - st.session_state.chat_history.append({ - "sender": "You", - "message": message, - "timestamp": timestamp - }) - st.session_state.messages_sent += 1 - # Bot reply (demo logic) - bot_reply = ( - f"Hey, great question about '{message}'! " - f"I'm happy to help you with that. Here's what I'm thinking..." - ) - st.session_state.chat_history.append({ - "sender": assistant_name, - "message": bot_reply, - "timestamp": timestamp - }) - diff --git a/Maruti_Divekar/Day_04/README.MD b/Maruti_Divekar/Day_04/README.MD deleted file mode 100644 index 8b13789..0000000 --- a/Maruti_Divekar/Day_04/README.MD +++ /dev/null @@ -1 +0,0 @@ - diff --git a/Maruti_Divekar/Day_05/README.md b/Maruti_Divekar/Day_05/README.md deleted file mode 100644 index 8b13789..0000000 --- a/Maruti_Divekar/Day_05/README.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/Maruti_Divekar/Day_07/README.md b/Maruti_Divekar/Day_07/README.md deleted file mode 100644 index 8b13789..0000000 --- a/Maruti_Divekar/Day_07/README.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/Maruti_Divekar/README.md b/Maruti_Divekar/README.md deleted file mode 100644 index 92e084a..0000000 --- a/Maruti_Divekar/README.md +++ /dev/null @@ -1 +0,0 @@ -# Maruti_Divekar diff --git a/Mayur_Chaudhari/README.md b/Mayur_Chaudhari/README.md deleted file mode 100644 index 1a7947f..0000000 --- a/Mayur_Chaudhari/README.md +++ /dev/null @@ -1 +0,0 @@ -# Mayur_Chaudhari diff --git a/Michal_Maciejewski/README.md b/Michal_Maciejewski/README.md deleted file mode 100644 index 9c347f4..0000000 --- a/Michal_Maciejewski/README.md +++ /dev/null @@ -1 +0,0 @@ -# Michal_Maciejewski diff --git a/Mishi_Vidya_Sinku/README.md b/Mishi_Vidya_Sinku/README.md deleted file mode 100644 index 344f68d..0000000 --- a/Mishi_Vidya_Sinku/README.md +++ /dev/null @@ -1 +0,0 @@ -# Mishi_Vidya_Sinku diff --git a/Monalisa_Samal/Day_3_Completed_3_assignments/README.md b/Monalisa_Samal/Day_3_Completed_3_assignments/README.md deleted file mode 100644 index 8b13789..0000000 --- a/Monalisa_Samal/Day_3_Completed_3_assignments/README.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/Monalisa_Samal/Day_6_Completed_4_assignments/README.md b/Monalisa_Samal/Day_6_Completed_4_assignments/README.md deleted file mode 100644 index 2ebb74d..0000000 --- a/Monalisa_Samal/Day_6_Completed_4_assignments/README.md +++ /dev/null @@ -1 +0,0 @@ -Completed the 4 assigments of Day on RAG diff --git a/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_1_vector_db_basics.ipynb b/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_1_vector_db_basics.ipynb deleted file mode 100644 index ee1c097..0000000 --- a/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_1_vector_db_basics.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"rX4VtY8Gc4Aa"},"source":["# Assignment 1: Vector Database Creation and Retrieval\n","## Day 6 Session 2 - RAG Fundamentals\n","\n","**OBJECTIVE:** Create a vector database from a folder of documents and implement basic retrieval functionality.\n","\n","**LEARNING GOALS:**\n","- Understand document loading with SimpleDirectoryReader\n","- Learn vector store setup with LanceDB\n","- Implement vector index creation\n","- Perform semantic search and retrieval\n","\n","**DATASET:** Use the data folder in `Day_6/session_2/data/` which contains multiple file types\n","\n","**INSTRUCTIONS:**\n","1. Complete each function by replacing the TODO comments with actual implementation\n","2. Run each cell after completing the function to test it\n","3. The answers can be found in the existing notebooks in the `llamaindex_rag/` folder\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"mGtEGJgKdEty","executionInfo":{"status":"ok","timestamp":1762068951338,"user_tz":-330,"elapsed":31991,"user":{"displayName":"","userId":""}},"outputId":"3f9858b0-fd42-41ae-bddc-ed6cbd3baca5"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["# If it's in a specific folder (e.g., \"Projects/MyProject/\")\n","!pip install -r '/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"L4iAVZ0Yf0g-","executionInfo":{"status":"ok","timestamp":1762069356067,"user_tz":-330,"elapsed":49537,"user":{"displayName":"","userId":""}},"outputId":"809ee24f-a3e1-4d97-c939-fab5baf825ff"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m15.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m9.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m35.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m101.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m65.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m83.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m20.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m12.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m95.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m21.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m10.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=be017bdd70010807e561385dfd0514f077b15200b867f9378992997bd48a9fba\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"bf6145c7209b49e19021b33f4c254a27"}},"metadata":{}}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"gtmrrU8ic4Ad","executionInfo":{"status":"ok","timestamp":1762069483035,"user_tz":-330,"elapsed":51999,"user":{"displayName":"","userId":""}},"outputId":"eff993f8-ffe4-47bc-a240-6b7070bb2425"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import os\n","from pathlib import Path\n","from typing import List\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","\n","print(\"โœ… Libraries imported successfully!\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"IhBUV3rEc4Ae","executionInfo":{"status":"ok","timestamp":1762069961873,"user_tz":-330,"elapsed":2079,"user":{"displayName":"","userId":""}},"outputId":"7add2c19-d99f-49d6-e07d-e87cdc20e07d"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OpenRouter API key found in Colab secrets\n","โœ… LlamaIndex configured with local embeddings\n"," Using BAAI/bge-small-en-v1.5 for document embeddings\n"]}],"source":["# Configure LlamaIndex Settings (Using OpenRouter - No OpenAI API Key needed)\n","# Configure LlamaIndex Settings (Using OpenRouter - No OpenAI API Key needed)\n","def setup_llamaindex_settings():\n"," \"\"\"\n"," Configure LlamaIndex with local embeddings and OpenRouter for LLM.\n"," This assignment focuses on vector database operations, so we'll use local embeddings only.\n"," \"\"\"\n"," # Check for OpenRouter API key (for future use, not needed for this basic assignment)\n"," from google.colab import userdata\n","\n"," try:\n"," api_key = userdata.get('OPEN_ROUTER') # or whatever you named your secret\n"," print(\"โœ… OpenRouter API key found in Colab secrets\")\n"," except Exception:\n"," print(\"โ„น๏ธ OPENROUTER_API_KEY not found - that's OK for this assignment!\")\n"," print(\" This assignment only uses local embeddings for vector operations.\")\n","\n"," # Configure local embeddings (no API key required)\n"," Settings.embed_model = HuggingFaceEmbedding(\n"," model_name=\"BAAI/bge-small-en-v1.5\",\n"," trust_remote_code=True\n"," )\n","\n"," print(\"โœ… LlamaIndex configured with local embeddings\")\n"," print(\" Using BAAI/bge-small-en-v1.5 for document embeddings\")\n","\n","# Setup the configuration\n","setup_llamaindex_settings()"]},{"cell_type":"markdown","metadata":{"id":"tgAR2pDkc4Af"},"source":["## 1. Document Loading Function\n","\n","Complete the function below to load documents from a folder using `SimpleDirectoryReader`.\n","\n","**Note:** This assignment uses local embeddings only - no OpenAI API key required! We're configured to use OpenRouter for future LLM operations.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"afKvr3CHc4Ag","executionInfo":{"status":"ok","timestamp":1762070845711,"user_tz":-330,"elapsed":65372,"user":{"displayName":"","userId":""}},"outputId":"440cfccb-19b3-4b8a-fa53-556cea45660f"},"outputs":[{"output_type":"stream","name":"stderr","text":["100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 139M/139M [00:00<00:00, 157MiB/s]\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n"]},{"output_type":"stream","name":"stdout","text":["TODO: Load documents from /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/data\n","Loaded 42 documents\n"]}],"source":["from llama_index.core import SimpleDirectoryReader\n","\n","def load_documents_from_folder(folder_path: str):\n"," \"\"\"\n"," Load documents from a folder using SimpleDirectoryReader.\n","\n"," TODO: Complete this function to load documents from the given folder path.\n"," HINT: Use SimpleDirectoryReader with recursive parameter to load all files\n","\n"," Args:\n"," folder_path (str): Path to the folder containing documents\n","\n"," Returns:\n"," List of documents loaded from the folder\n"," \"\"\"\n"," # TODO: Create SimpleDirectoryReader instance\n"," reader = SimpleDirectoryReader(input_dir=folder_path, recursive=True)\n","\n"," # TODO: Load and return documents\n"," documents = reader.load_data()\n","\n","\n"," # PLACEHOLDER - Replace with actual implementation\n"," print(f\"TODO: Load documents from {folder_path}\")\n","\n"," return documents\n","\n","# Test the function after you complete it\n","test_folder = \"/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/data\"\n","documents = load_documents_from_folder(test_folder)\n","print(f\"Loaded {len(documents)} documents\")\n"]},{"cell_type":"markdown","metadata":{"id":"DZ2L7z7ac4Ag"},"source":["## 2. Vector Store Creation Function\n","\n","Complete the function below to create a LanceDB vector store.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"m0KBU3uUc4Ah","executionInfo":{"status":"ok","timestamp":1762071429191,"user_tz":-330,"elapsed":323,"user":{"displayName":"","userId":""}},"outputId":"0af919ab-ec14-456e-dfff-f526b3e166b3"},"outputs":[{"output_type":"stream","name":"stderr","text":["WARNING:llama_index.vector_stores.lancedb.base:Table documents doesn't exist yet. Please add some data to create it.\n"]},{"output_type":"stream","name":"stdout","text":["TODO: Create vector store at ./assignment_vectordb\n","Vector store created: True\n"]}],"source":["def create_vector_store(db_path: str = \"./vectordb\", table_name: str = \"documents\"):\n"," \"\"\"\n"," Create a LanceDB vector store for storing document embeddings.\n","\n"," TODO: Complete this function to create and configure a LanceDB vector store.\n"," HINT: Use LanceDBVectorStore with uri and table_name parameters\n","\n"," Args:\n"," db_path (str): Path where the vector database will be stored\n"," table_name (str): Name of the table in the vector database\n","\n"," Returns:\n"," LanceDBVectorStore: Configured vector store\n"," \"\"\"\n"," # TODO: Create the directory if it doesn't exist\n"," Path(db_path).mkdir(parents=True, exist_ok=True)\n","\n"," # TODO: Create vector store\n"," vector_store = LanceDBVectorStore (\n"," uri=db_path,\n"," table_name=table_name\n"," )\n","\n"," # PLACEHOLDER - Replace with actual implementation\n"," print(f\"TODO: Create vector store at {db_path}\")\n","\n"," return vector_store\n","\n","\n","\n","# Test the function after you complete it\n","vector_store = create_vector_store(\"./assignment_vectordb\")\n","print(f\"Vector store created: {vector_store is not None}\")\n"]},{"cell_type":"markdown","metadata":{"id":"g3X5bjtIc4Ah"},"source":["## 3. Vector Index Creation Function\n","\n","Complete the function below to create a vector index from documents.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":116,"referenced_widgets":["c800f798befc49ec85ac21296ae3ede3","b6b184be73b64f52b5229efca5b7b535","a06c7b0401ab42658f5fceeb02719936","4c709bca2e5b45e18c1052b1443040b9","a2dd38f7ea6a4b0aaf4fa9d9d75bb2f7","7fb84c2a901a4d298d3733a3e3067a07","288422a6ec0d4266b9aa42f9a813fd1f","ed98766b22fb44f1a9b4099e6f5c6e8b","7381b62a3d3b4040adf365e00ce5d0ee","bc98f581514549cabc2bac83d017cf5a","f368138346d04fffb0a7fa1faa5986ec","9a3658c2ed064f579537fe277cc81243","5e1d55b3cf2a442e82108bedb94ac345","e0e0fef0e6934b728298d880f3fcfc24","d0dd192a6edb42d2ab6c3e1e96843a6a","2d06766fb0f14899a1c96b11bf04beb3","2b1872425a894d6298193dfa433fe76d","c50c88d75b9b41fdaa8d66fc24b3391f","2619ccd9ecb14392a78eb8469fd7de39","bef8337bdbcf451995c6ae2db1c94e68","b8a2dc32c1d44f3ebe8fa919aa69ccdc","4fb1db5731ed4e1dbf9f0c159febd03f"]},"id":"cDeK2JhSc4Ai","executionInfo":{"status":"ok","timestamp":1762071914576,"user_tz":-330,"elapsed":42292,"user":{"displayName":"","userId":""}},"outputId":"9cd990d6-1874-46ea-9a12-4e1afd55750a"},"outputs":[{"output_type":"display_data","data":{"text/plain":["Parsing nodes: 0%| | 0/42 [00:00 402\u001b[0;31m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 403\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mHTTPError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/requests/models.py\u001b[0m in \u001b[0;36mraise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1025\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhttp_error_msg\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1026\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mHTTPError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhttp_error_msg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1027\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mHTTPError\u001b[0m: 401 Client Error: Unauthorized for url: https://huggingface.co/api/whoami-v2","\nThe above exception was the direct cause of the following exception:\n","\u001b[0;31mHfHubHTTPError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_api.py\u001b[0m in \u001b[0;36mwhoami\u001b[0;34m(self, token)\u001b[0m\n\u001b[1;32m 1799\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1800\u001b[0;31m \u001b[0mhf_raise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1801\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mHTTPError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_http.py\u001b[0m in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 474\u001b[0m \u001b[0;31m# as well (request id and/or server error message)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 475\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0m_format\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mHfHubHTTPError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 476\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mHfHubHTTPError\u001b[0m: 401 Client Error: Unauthorized for url: https://huggingface.co/api/whoami-v2 (Request ID: Root=1-69075383-79acb4c72606ee2b66a2018e;4b91b747-f307-4671-9ee1-87bbb3f6fe74)\n\nInvalid credentials in Authorization header","\nThe above exception was the direct cause of the following exception:\n","\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipython-input-2265489888.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# Use with Hugging Face libraries\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mhuggingface_hub\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mlogin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mlogin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhf_token\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_deprecation.py\u001b[0m in \u001b[0;36minner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;34m\"\\n\\n\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mcustom_message\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0mwarnings\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFutureWarning\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 101\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 102\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0minner_f\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_deprecation.py\u001b[0m in \u001b[0;36minner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mall_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 31\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 32\u001b[0m \u001b[0;31m# extra_args > 0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m args_msg = [\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/_login.py\u001b[0m in \u001b[0;36mlogin\u001b[0;34m(token, add_to_git_credential, new_session, write_permission)\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;34m\"you want to set the git credential as well.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m )\n\u001b[0;32m--> 120\u001b[0;31m \u001b[0m_login\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0madd_to_git_credential\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0madd_to_git_credential\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 121\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mis_notebook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 122\u001b[0m \u001b[0mnotebook_login\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnew_session\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnew_session\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/_login.py\u001b[0m in \u001b[0;36m_login\u001b[0;34m(token, add_to_git_credential)\u001b[0m\n\u001b[1;32m 396\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"You must use your personal account token, not an organization token.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 398\u001b[0;31m \u001b[0mtoken_info\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwhoami\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 399\u001b[0m \u001b[0mpermission\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtoken_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"auth\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"accessToken\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"role\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 400\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"Token is valid (permission: {permission}).\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_validators.py\u001b[0m in \u001b[0;36m_inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msmoothly_deprecate_use_auth_token\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhas_token\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhas_token\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 114\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 115\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_inner_fn\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_api.py\u001b[0m in \u001b[0;36mwhoami\u001b[0;34m(self, token)\u001b[0m\n\u001b[1;32m 1812\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0meffective_token\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0m_get_token_from_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1813\u001b[0m \u001b[0merror_message\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;34m\" The token stored is invalid. Please run `hf auth login` to update it.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1814\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mHTTPError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merror_message\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1815\u001b[0m \u001b[0;32mraise\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1816\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mHTTPError\u001b[0m: Invalid user token. The token from Google Colab vault is invalid. Please update it from the UI."]}]},{"cell_type":"markdown","metadata":{"id":"En2F-NY_c4Aj"},"source":["## 5. Final Test - Complete Pipeline\n","\n","Once you've completed all the functions above, run this cell to test the complete pipeline with multiple search queries.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":986,"referenced_widgets":["427e4b135571428fa45bbc1684856feb","6ee97068ef7b45e9a3886ab57b783a62","5eff01a40dbe40f3bd4b495fe52e6b36","8a7eb64cae22471787346d0b8fa86d25","6223159a14db42b5a8ad8ab9c6fc9b17","bf26c6f946a9479c89d02dedd73f6a20","8e98cab9140a4910a45033c3463eede1","8df3dcb6ea8f47f58c01f8d7417a35c7","ab59301ad2ea4f9bb90daf49d9c4e179","8e9f91584fa346449b267d3feb8d1de9","be0f0f881eca4f0799cbb541394b4867","56bf583baaf8410793f299b92f4e404a","bac0bf1f765a41819f13d92d130911fa","4c79b48a43b9486fa3fd89de29d1b585","a0c79394e0f74ce7a64274f8259ba07e","f28fcef72b384819853aab8ddc0fa588","3ce9e43b6a784ae0a90a0bb87f51d2c4","d3230baafa0a4e1f91f1ad9c11087b92","4a01d57a70a546c0b147a3021f1a4702","49dcb29cc0214e83ba0cc8ead707d6b7","94a9026cc60443be952cfd211f66950d","f93e7ec59b4b48beb1e5b15099d8cb8a"]},"id":"C4DysfYGc4Aj","executionInfo":{"status":"ok","timestamp":1762072280240,"user_tz":-330,"elapsed":83787,"user":{"displayName":"","userId":""}},"outputId":"0d6fc2fa-35ff-47f5-b267-1548e3c10ba9"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐Ÿš€ Testing Complete Vector Database Pipeline\n","==================================================\n","\n","๐Ÿ“‚ Step 1: Loading documents...\n"]},{"output_type":"stream","name":"stderr","text":["/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n"]},{"output_type":"stream","name":"stdout","text":["TODO: Load documents from /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/data\n"," Loaded 42 documents\n","\n","๐Ÿ—„๏ธ Step 2: Creating vector store...\n","TODO: Create vector store at ./assignment_vectordb\n"," Vector store status: โœ… Created\n","\n","๐Ÿ”— Step 3: Creating vector index...\n"]},{"output_type":"display_data","data":{"text/plain":["Parsing nodes: 0%| | 0/42 [00:001.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (80.9.0)\n","Requirement already satisfied: jedi>=0.16 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.19.2)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Requirement already satisfied: deprecation in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (2.1.0)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Requirement already satisfied: lance-namespace>=0.0.16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (0.0.20)\n","Requirement already satisfied: llama-index-cli<0.6,>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.5.3)\n","Requirement already satisfied: llama-index-core<0.15.0,>=0.14.7 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.14.7)\n","Requirement already satisfied: llama-index-indices-managed-llama-cloud>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.9.4)\n","Requirement already satisfied: llama-index-llms-openai<0.7,>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.6)\n","Requirement already satisfied: llama-index-readers-file<0.6,>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.5.4)\n","Requirement already satisfied: llama-index-readers-llama-parse>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.5.1)\n","Requirement already satisfied: pylance in /usr/local/lib/python3.12/dist-packages (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13)) (0.38.3)\n","Requirement already satisfied: tantivy in /usr/local/lib/python3.12/dist-packages (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13)) (0.25.0)\n","Requirement already satisfied: llama-index-llms-openai-like<0.6,>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17)) (0.5.3)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Requirement already satisfied: lance-namespace-urllib3-client in /usr/local/lib/python3.12/dist-packages (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (0.0.20)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: aiosqlite in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.21.0)\n","Requirement already satisfied: banks<3,>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.2.0)\n","Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.7)\n","Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.18)\n","Requirement already satisfied: dirtyjson<2,>=1.0.8 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.0.8)\n","Requirement already satisfied: filetype<2,>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.0)\n","Requirement already satisfied: llama-index-workflows!=2.9.0,<3,>=2 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.10.2)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.9.0)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.17.3)\n","Requirement already satisfied: llama-cloud==0.1.35 in /usr/local/lib/python3.12/dist-packages (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.1.35)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Requirement already satisfied: pypdf<7,>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (6.1.3)\n","Requirement already satisfied: striprtf<0.0.27,>=0.0.26 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.0.26)\n","Requirement already satisfied: llama-parse>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.54)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Requirement already satisfied: griffe in /usr/local/lib/python3.12/dist-packages (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.14.0)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Requirement already satisfied: llama-index-instrumentation>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.4.2)\n","Requirement already satisfied: llama-cloud-services>=0.6.54 in /usr/local/lib/python3.12/dist-packages (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.54)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.12/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.1.0)\n","Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.12/dist-packages (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.26.1)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: colorama>=0.4 in /usr/local/lib/python3.12/dist-packages (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.4.6)\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"wlT1dnXjYchO","executionInfo":{"status":"ok","timestamp":1762089698115,"user_tz":-330,"elapsed":10,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"740c93db-ab39-4f37-edeb-93b9c8580141"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG libraries imported successfully!\n"]}],"source":["# Import required libraries for advanced RAG\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","from pydantic import BaseModel, Field\n","\n","# Core LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.core.query_engine import RetrieverQueryEngine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","# Vector store\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","\n","# Embeddings and LLM\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components (we'll use these in the assignments)\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.output_parsers import PydanticOutputParser\n","\n","print(\"โœ… Advanced RAG libraries imported successfully!\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"R_3uIzeMYchP","executionInfo":{"status":"ok","timestamp":1762089706600,"user_tz":-330,"elapsed":3209,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"42778bab-2cb4-44d2-a7c8-0d03aa468ae9"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OpenRouter API key found in Colab secrets\n","โœ… Advanced RAG settings configured\n"," - Chunk size: 512 (optimized for precision)\n"," - Using local embeddings for cost efficiency\n"," - OpenRouter LLM ready for response synthesis\n"]}],"source":["# Configure Advanced RAG Settings (Using OpenRouter)\n","def setup_advanced_rag_settings():\n"," \"\"\"\n"," Configure LlamaIndex with optimized settings for advanced RAG.\n"," Uses local embeddings and OpenRouter for LLM operations.\n"," \"\"\"\n"," # Check for OpenRouter API key\n","\n"," from google.colab import userdata\n","\n"," try:\n"," api_key = userdata.get('OPEN_ROUTER') # your named your secret\n"," print(\"โœ… OpenRouter API key found in Colab secrets\")\n"," except Exception:\n"," print(\"โ„น๏ธ OPENROUTER_API_KEY not found - that's OK for this assignment!\")\n"," print(\" This assignment only uses local embeddings for vector operations.\")\n","\n","\n"," # Configure OpenRouter LLM\n"," Settings.llm = OpenRouter(\n"," api_key=api_key,\n"," model=\"gpt-4o\",\n"," temperature=0.1 # Lower temperature for more consistent responses\n"," )\n","\n"," # Configure local embeddings (no API key required)\n"," Settings.embed_model = HuggingFaceEmbedding(\n"," model_name=\"BAAI/bge-small-en-v1.5\",\n"," trust_remote_code=True\n"," )\n","\n"," # Advanced RAG configuration\n"," Settings.chunk_size = 512 # Smaller chunks for better precision\n"," Settings.chunk_overlap = 50\n","\n"," print(\"โœ… Advanced RAG settings configured\")\n"," print(\" - Chunk size: 512 (optimized for precision)\")\n"," print(\" - Using local embeddings for cost efficiency\")\n"," print(\" - OpenRouter LLM ready for response synthesis\")\n","\n","# Setup the configuration\n","setup_advanced_rag_settings()\n"]},{"cell_type":"code","source":["from google.colab import userdata\n","import os\n","\n","# Get the token from Colab secrets\n","hf_token = userdata.get('HF_TOKEN')\n","\n","# Set as environment variable (optional)\n","os.environ['HF_TOKEN'] = hf_token\n","\n","# Use with Hugging Face libraries\n","from huggingface_hub import login\n","login(token=hf_token)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"J7mg0QC3nmgs","executionInfo":{"status":"ok","timestamp":1762089712808,"user_tz":-330,"elapsed":899,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"4fc230c4-a763-4c1a-b640-85f2e1e67f42"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stderr","text":["Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n","WARNING:huggingface_hub._login:Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":255,"referenced_widgets":["a9d4ddd2138e497f9050b59d4a579687","28265196138e4fc2b71a6f28b6c9ae0c","a341f62d1b374c63b71f478afe421377","1a852a69c3274338bd6634fb0c16a457","d1a87baee2b34ff39294a9957609ae33","0b6a9975c3a84f048b3b34e678c1a3d8","70441a90759e419fbc118642ee3afeb8","e80346f88fa049f9bc4690b520faeadf","2607a80a19ef469a852a574dde7deec9","752bdeb9460947cdb9731e87828b93c7","a9a4ffd40f97484fa39374570f6fd006","18aba1e55c084b45a59d723c920649db","a8395d6fb02e49cb8062308a40db32b8","4d2373e1731b46f6960ae50e48ea2d33","cc18d8a0b8904643a0cd1f13b7ba8bd3","e051a3be3c35478cbd343dbacdd48acf","a011adb7c30245deb082532acfd038cb","4208e397a3a7458aa5b16c91bc7b5ac8","f1c05ad90ee74a1fbb77d618ff193971","656d18ae543a463ea98145be9cc6771e","6141e0010d114134ba65680c86be8720","c86f10069df241d3adfcf0f884a61389"]},"id":"IseBtxKeYchP","executionInfo":{"status":"ok","timestamp":1762089804808,"user_tz":-330,"elapsed":87443,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"467cb814-9deb-4ca1-a7ea-1d41a4dd38f0"},"outputs":[{"output_type":"stream","name":"stderr","text":["/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n"]},{"output_type":"display_data","data":{"text/plain":["Parsing nodes: 0%| | 0/42 [00:00=0.13.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-llms-huggingface) (0.14.7)\n","Requirement already satisfied: torch<3,>=2.1.2 in /usr/local/lib/python3.12/dist-packages (from llama-index-llms-huggingface) (2.8.0+cu126)\n","Requirement already satisfied: transformers<5,>=4.37.0 in /usr/local/lib/python3.12/dist-packages (from transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (4.57.1)\n","Requirement already satisfied: aiohttp<4,>=3.8.6 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.13.1)\n","Requirement already satisfied: aiosqlite in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.21.0)\n","Requirement already satisfied: banks<3,>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.2.0)\n","Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.6.7)\n","Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.2.18)\n","Requirement already satisfied: dirtyjson<2,>=1.0.8 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.0.8)\n","Requirement already satisfied: filetype<2,>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.2.0)\n","Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2025.3.0)\n","Requirement already satisfied: httpx in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.28.1)\n","Requirement already satisfied: llama-index-workflows!=2.9.0,<3,>=2 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.10.2)\n","Requirement already satisfied: nest-asyncio<2,>=1.5.8 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.6.0)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.5)\n","Requirement already satisfied: nltk>3.8.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.0.2)\n","Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (11.3.0)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.5.0)\n","Requirement already satisfied: pydantic>=2.8.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.11.10)\n","Requirement already satisfied: pyyaml>=6.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (6.0.3)\n","Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.32.4)\n","Requirement already satisfied: setuptools>=80.9.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (80.9.0)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (8.5.0)\n","Requirement already satisfied: tiktoken>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.12.0)\n","Requirement already satisfied: tqdm<5,>=4.66.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.67.1)\n","Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.15.0)\n","Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.9.0)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.17.3)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (3.20.0)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (1.13.3)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (3.1.6)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (1.11.1.6)\n","Requirement already satisfied: triton==3.4.0 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (3.4.0)\n","Requirement already satisfied: huggingface-hub<1.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (0.36.0)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (25.0)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (2024.11.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (0.6.2)\n","Requirement already satisfied: accelerate>=0.26.0 in /usr/local/lib/python3.12/dist-packages (from transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (1.11.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate>=0.26.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (5.9.5)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.22.0)\n","Requirement already satisfied: griffe in /usr/local/lib/python3.12/dist-packages (from banks<3,>=2.2.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.14.0)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.34.0->transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (1.2.0)\n","Requirement already satisfied: llama-index-instrumentation>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.2)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk>3.8.1->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk>3.8.1->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.5.2)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.2)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.4.4)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.11)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.5.0)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2025.10.5)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch<3,>=2.1.2->llama-index-llms-huggingface) (1.3.0)\n","Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.12/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.1.0)\n","Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.12/dist-packages (from dataclasses-json->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.26.1)\n","Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.11.0)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.16.0)\n","Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch<3,>=2.1.2->llama-index-llms-huggingface) (3.0.3)\n","Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.12/dist-packages (from anyio->httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.3.1)\n","Requirement already satisfied: colorama>=0.4 in /usr/local/lib/python3.12/dist-packages (from griffe->banks<3,>=2.2.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.6)\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"NXdeLi2NYchP","executionInfo":{"status":"ok","timestamp":1762091882924,"user_tz":-330,"elapsed":2769,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"18aab0d1-d8e5-46de-853f-f1b47f858471"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OpenAI API key loaded from Colab secrets\n","โœ… OpenAI LLM configured successfully\n","โœ… Query engine with similarity cutoff 0.3 created\n","โœ… Query engine with similarity filtering created\n","\n","๐Ÿ” Testing query: 'What are the benefits of AI agents?'\n","๐Ÿ“ Response: The benefits of AI agents include their enhanced reasoning, planning, and tool execution capabilities, which enable them to achieve complex goals efficiently. Additionally, AI agents can communicate effectively, adapt to different scenarios, and work collaboratively in both single-agent and multi-agent architectures.\n"]}],"source":["import os\n","from google.colab import userdata\n","from llama_index.llms.openai import OpenAI\n","from llama_index.core import Settings\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","\n","# Get OpenAI API key from Colab secrets\n","try:\n"," openai_api_key = userdata.get('OPENAI_API') # Your secret name\n"," os.environ[\"OPENAI_API_KEY\"] = openai_api_key # What OpenAI expects\n"," print(\"โœ… OpenAI API key loaded from Colab secrets\")\n","except Exception as e:\n"," print(f\"โŒ Error loading OpenAI API key from secrets: {e}\")\n"," print(\"๐Ÿ’ก Make sure you have added 'OPENAI_API' to your Colab secrets\")\n"," print(\" Go to the key icon (๐Ÿ”‘) in the left sidebar and add your key\")\n","\n","# Use OpenAI which handles long contexts much better\n","Settings.llm = OpenAI(model=\"gpt-3.5-turbo\", max_tokens=256)\n","print(\"โœ… OpenAI LLM configured successfully\")\n","\n","def create_query_engine_with_similarity_filter(index, similarity_cutoff: float = 0.3, top_k: int = 5):\n"," \"\"\"\n"," Create a query engine that filters results based on similarity scores.\n","\n"," TODO: Complete this function to create a query engine with similarity postprocessing.\n"," HINT: Use index.as_query_engine() with node_postprocessors parameter containing SimilarityPostprocessor\n","\n"," Args:\n"," index: Vector index to query\n"," similarity_cutoff: Minimum similarity score (0.0 to 1.0)\n"," top_k: Number of initial results to retrieve before filtering\n","\n"," Returns:\n"," Query engine with similarity filtering\n"," \"\"\"\n"," try:\n"," # TODO: Create similarity postprocessor with the cutoff threshold\n"," similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n","\n"," # TODO: Create query engine with similarity filtering\n"," query_engine = index.as_query_engine(\n"," similarity_top_k=top_k,\n"," node_postprocessors=[similarity_processor]\n"," )\n","\n"," print(f\"โœ… Query engine with similarity cutoff {similarity_cutoff} created\")\n"," return query_engine\n","\n"," except Exception as e:\n"," print(f\"โŒ Error creating query engine: {e}\")\n"," return None\n","\n","# Test the function with error handling\n","if 'index' in locals() and index:\n"," filtered_engine = create_query_engine_with_similarity_filter(index, similarity_cutoff=0.3, top_k=3)\n","\n"," if filtered_engine:\n"," print(\"โœ… Query engine with similarity filtering created\")\n","\n"," # Test query\n"," test_query = \"What are the benefits of AI agents?\"\n"," print(f\"\\n๐Ÿ” Testing query: '{test_query}'\")\n","\n"," try:\n"," response = filtered_engine.query(test_query)\n"," print(f\"๐Ÿ“ Response: {response}\")\n"," except Exception as e:\n"," print(f\"โŒ Error during query: {e}\")\n"," print(\"๐Ÿ’ก Try using a different model or check your data preprocessing\")\n"," else:\n"," print(\"โŒ Failed to create filtered query engine\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")"]},{"cell_type":"markdown","metadata":{"id":"X8qGszd0YchP"},"source":["## 2. Response Synthesizers - TreeSummarize\n","\n","**Concept:** Response synthesizers control how retrieved information becomes final answers. `TreeSummarize` builds responses hierarchically, ideal for complex analytical questions.\n","\n","**Why it matters:** Different synthesis strategies work better for different query types. TreeSummarize excels at comprehensive analysis and long-form responses.\n","\n","Complete the function below to create a query engine with TreeSummarize response synthesis.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"th0Kz2rMYchP","executionInfo":{"status":"ok","timestamp":1762092383190,"user_tz":-330,"elapsed":4006,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"b704bba9-0419-421a-d682-018420b6046e"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Create query engine with TreeSummarize synthesis\n","โœ… Query engine with TreeSummarize created\n","\n","๐Ÿ” Testing analytical query: 'Compare the advantages and disadvantages of different AI agent frameworks'\n","๐Ÿ“ TreeSummarize Response:\n","Advantages and disadvantages of different AI agent frameworks can be compared based on factors such as complexity, learning curve, best use case, performance considerations, and suitability for different tasks. Frameworks like LangChain offer a moderate complexity level and are suitable for general LLM applications, while AutoGPT is known for its high complexity and steep learning curve, making it ideal for autonomous tasks. CrewAI, on the other hand, has a medium complexity level with an easy learning curve, making it suitable for team collaboration. LlamaIndex stands out with low complexity and ease of use, making it a good fit for document Q&A tasks. Performance considerations show that single agents typically have lower latency compared to multi-agent systems, but the latter are often more accurate for complex tasks. However, more agents in a system can lead to higher API costs. In terms of reliability, simpler frameworks are generally more stable. When choosing the right framework, beginners may find LlamaIndex or simple LangChain suitable, while those tackling complex tasks may benefit from using AutoGPT or multi-agent systems.\n"]}],"source":["def create_query_engine_with_tree_summarize(index, top_k: int = 5):\n"," \"\"\"\n"," Create a query engine that uses TreeSummarize for comprehensive responses.\n","\n"," TODO: Complete this function to create a query engine with TreeSummarize synthesis.\n"," HINT: Create a TreeSummarize instance, then use index.as_query_engine() with response_synthesizer parameter\n","\n"," Args:\n"," index: Vector index to query\n"," top_k: Number of results to retrieve\n","\n"," Returns:\n"," Query engine with TreeSummarize synthesis\n"," \"\"\"\n"," # TODO: Create TreeSummarize response synthesizer\n"," tree_synthesizer =TreeSummarize()\n","\n"," # TODO: Create query engine with the synthesizer\n"," query_engine = index.as_query_engine(\n"," response_synthesizer=tree_synthesizer,\n"," similarity_top_k=top_k\n"," )\n","\n","\n","\n"," # PLACEHOLDER - Replace with actual implementation\n"," print(f\"โœ… Create query engine with TreeSummarize synthesis\")\n","\n"," return query_engine\n","\n","\n","# Test the function\n","if index:\n"," tree_engine = create_query_engine_with_tree_summarize(index)\n","\n"," if tree_engine:\n"," print(\"โœ… Query engine with TreeSummarize created\")\n","\n"," # Test with a complex analytical query\n"," analytical_query = \"Compare the advantages and disadvantages of different AI agent frameworks\"\n"," print(f\"\\n๐Ÿ” Testing analytical query: '{analytical_query}'\")\n","\n"," try:\n"," response = tree_engine.query(analytical_query)\n"," print(f\"๐Ÿ“ TreeSummarize Response:\\n{response}\")\n"," except Exception as e:\n"," print(f\"โŒ Error during query: {e}\")\n"," # Uncomment when implemented:\n","\n","\n","\n"," else:\n"," print(\"โŒ Failed to create TreeSummarize query engine\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")\n"]},{"cell_type":"markdown","metadata":{"id":"abAfqxgnYchQ"},"source":["## 3. Structured Outputs with Pydantic Models\n","\n","**Concept:** Structured outputs ensure predictable, parseable responses using Pydantic models. This is essential for API endpoints and data pipelines.\n","\n","**Why it matters:** Instead of free-text responses, you get type-safe, validated data structures that applications can reliably process.\n","\n","Complete the function below to create a structured output system for extracting research paper information.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"G0AwVrGwYchQ","executionInfo":{"status":"ok","timestamp":1762095388759,"user_tz":-330,"elapsed":1762,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"eb3b067d-9dcb-407b-da11-60369ca3032f"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ…: Create structured output program with ResearchPaperInfo\n","โœ… Structured output program created\n","\n","๐Ÿ” Testing structured query: 'Tell me about AI agents and their capabilities'\n","๐Ÿ“Š Structured Response:\n","title='AI Agents and Their Capabilities' key_points=['Architectures leveraging advanced techniques are more effective across various benchmarks and problem types', 'Current AI-driven agents show promise but have notable limitations and areas for improvement', 'Challenges around agent benchmarks, real-world applicability, and mitigating harmful biases need to be addressed for reliable agents'] applications=[] summary='The survey explores the progression from static language models to dynamic, autonomous agents, providing a comprehensive understanding of the current AI agent landscape and insights for developers.'\n"]}],"source":["# First, define the Pydantic models for structured outputs\n","class ResearchPaperInfo(BaseModel):\n"," \"\"\"Structured information about a research paper or AI concept.\"\"\"\n"," title: str = Field(description=\"The main title or concept name\")\n"," key_points: List[str] = Field(description=\"3-5 main points or findings\")\n"," applications: List[str] = Field(description=\"Practical applications or use cases\")\n"," summary: str = Field(description=\"Brief 2-3 sentence summary\")\n","\n","# Import the missing component\n","from llama_index.core.program import LLMTextCompletionProgram\n","\n","def create_structured_output_program(output_model: BaseModel = ResearchPaperInfo):\n"," \"\"\"\n"," Create a structured output program using Pydantic models.\n","\n"," TODO: Complete this function to create a structured output program.\n"," HINT: Use LLMTextCompletionProgram.from_defaults() with PydanticOutputParser and a prompt template\n","\n"," Args:\n"," output_model: Pydantic model class for structured output\n","\n"," Returns:\n"," LLMTextCompletionProgram that returns structured data\n"," \"\"\"\n"," # TODO: Create output parser with the Pydantic model\n"," output_parser = PydanticOutputParser(output_model)\n","\n"," # TODO: Create the structured output program\n"," program = LLMTextCompletionProgram.from_defaults(\n"," output_parser=output_parser,\n"," prompt_template_str=(\n"," \"Extract structured information from the following context:\\n\"\n"," \"{context}\\n\\n\"\n"," \"Question: {query}\\n\\n\"\n"," \"Provide the information in the specified JSON format.\"\n"," )\n"," )\n","\n"," print(f\"โœ…: Create structured output program with {output_model.__name__}\")\n","\n"," return program\n","\n","\n","\n","# Test the function\n","if index:\n"," structured_program = create_structured_output_program(ResearchPaperInfo)\n","\n"," if structured_program:\n"," print(\"โœ… Structured output program created\")\n","\n"," # Test with retrieval and structured extraction\n"," structure_query = \"Tell me about AI agents and their capabilities\"\n"," print(f\"\\n๐Ÿ” Testing structured query: '{structure_query}'\")\n","\n"," # Get context for structured extraction (Uncomment when implemented)\n"," retriever = VectorIndexRetriever(index=index, similarity_top_k=3)\n"," nodes = retriever.retrieve(structure_query)\n"," context = \"\\n\".join([node.text for node in nodes])\n","\n","\n"," response = structured_program(context=context, query=structure_query)\n"," print(f\"๐Ÿ“Š Structured Response:\\n{response}\")\n","\n"," else:\n"," print(\"โŒ Failed to create structured output program\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")\n"]},{"cell_type":"markdown","metadata":{"id":"cuS2ueNtYchQ"},"source":["## 4. Advanced Pipeline - Combining All Techniques\n","\n","**Concept:** Combine multiple advanced techniques into a single powerful query engine: similarity filtering + response synthesis + structured output.\n","\n","**Why it matters:** Production RAG systems often need multiple techniques working together for optimal results.\n","\n","Complete the function below to create a comprehensive advanced RAG pipeline.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"zsKSzjscYchQ","executionInfo":{"status":"ok","timestamp":1762095552846,"user_tz":-330,"elapsed":2667,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"f16ed629-552a-47e5-8e66-8f0cde752139"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… : Create advanced RAG pipeline with all techniques\n","โœ… Advanced RAG pipeline created successfully!\n"," ๐Ÿ”ง Similarity filtering: โœ…\n"," ๐ŸŒณ TreeSummarize synthesis: โœ…\n","\n","๐Ÿ” Testing complex query: 'Analyze the current state and future potential of AI agent technologies'\n","๐Ÿš€ Advanced RAG Response:\n","The current state of AI agent technologies shows promising advancements in achieving complex goals that require enhanced reasoning, planning, and tool execution capabilities. Architectures leveraging these techniques have demonstrated effectiveness across various benchmarks and problem types. However, there are notable limitations that need to be addressed for future improvement. Challenges such as comprehensive agent benchmarks, real-world applicability, and mitigating harmful biases in language models are areas that require attention in the near term to enable the development of reliable agents. By transitioning from static language models to more dynamic, autonomous agents, the AI agent landscape is evolving towards more robust and capable systems. This progression aims to provide a holistic understanding of existing AI agent architectures and offers valuable insights for those involved in building or customizing agent systems.\n"," (Complete the function above to test the full pipeline)\n","\n","๐ŸŽฏ This should provide:\n"," - Filtered relevant results only\n"," - Comprehensive analytical response\n"," - Combined postprocessing and synthesis\n"]}],"source":["def create_advanced_rag_pipeline(index, similarity_cutoff: float = 0.3, top_k: int = 5):\n"," \"\"\"\n"," Create a comprehensive advanced RAG pipeline combining multiple techniques.\n","\n"," TODO: Complete this function to create the ultimate advanced RAG query engine.\n"," HINT: Combine SimilarityPostprocessor + TreeSummarize using index.as_query_engine()\n","\n"," Args:\n"," index: Vector index to query\n"," similarity_cutoff: Minimum similarity score for filtering\n"," top_k: Number of initial results to retrieve\n","\n"," Returns:\n"," Advanced query engine with filtering and synthesis combined\n"," \"\"\"\n"," # TODO: Create similarity postprocessor\n"," similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n","\n"," # TODO: Create TreeSummarize for comprehensive responses\n"," tree_synthesizer = TreeSummarize()\n","\n"," # TODO: Create the comprehensive query engine combining both techniques\n"," advanced_engine = index.as_query_engine(\n"," response_synthesizer=tree_synthesizer,\n"," node_postprocessors=[similarity_processor],\n"," similarity_top_k=top_k\n"," )\n","\n"," print(f\"โœ… : Create advanced RAG pipeline with all techniques\")\n","\n"," return advanced_engine\n","\n"," # PLACEHOLDER - Replace with actual implementation\n","\n"," #return None\n","\n","# Test the comprehensive pipeline\n","if index:\n"," advanced_pipeline = create_advanced_rag_pipeline(index)\n","\n"," if advanced_pipeline:\n"," print(\"โœ… Advanced RAG pipeline created successfully!\")\n"," print(\" ๐Ÿ”ง Similarity filtering: โœ…\")\n"," print(\" ๐ŸŒณ TreeSummarize synthesis: โœ…\")\n","\n"," # Test with complex query\n"," complex_query = \"Analyze the current state and future potential of AI agent technologies\"\n"," print(f\"\\n๐Ÿ” Testing complex query: '{complex_query}'\")\n","\n"," # Uncomment when implemented:\n"," response = advanced_pipeline.query(complex_query)\n"," print(f\"๐Ÿš€ Advanced RAG Response:\\n{response}\")\n"," print(\" (Complete the function above to test the full pipeline)\")\n","\n"," print(\"\\n๐ŸŽฏ This should provide:\")\n"," print(\" - Filtered relevant results only\")\n"," print(\" - Comprehensive analytical response\")\n"," print(\" - Combined postprocessing and synthesis\")\n"," else:\n"," print(\"โŒ Failed to create advanced RAG pipeline\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")\n"]},{"cell_type":"markdown","metadata":{"id":"YpIQsi_bYchQ"},"source":["## 5. Final Test - Compare Basic vs Advanced RAG\n","\n","Once you've completed all the functions above, run this cell to compare basic RAG with your advanced techniques.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"cOm-hv1cYchQ","executionInfo":{"status":"ok","timestamp":1762095884735,"user_tz":-330,"elapsed":10556,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"34e01e76-4134-4dcc-eba2-bb69ebb67a82"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\n","============================================================\n","\n","๐Ÿ“Š Component Status:\n"," โœ… Basic Index\n"," โœ… Similarity Filter\n"," โœ… TreeSummarize\n"," โœ… Structured Output\n"," โœ… Advanced Pipeline\n","\n","๐Ÿ” Creating basic query engine for comparison...\n","\n","============================================================\n","๐Ÿ†š COMPARISON: Basic vs Advanced RAG\n","============================================================\n","\n","๐Ÿ“‹ Test Query 1: 'What are the key capabilities of AI agents?'\n","--------------------------------------------------\n","๐Ÿ”น Basic RAG:\n"," Response: The key capabilities of AI agents include strong performance on complex tasks involving reasoning and tool execution, the ability to work iteratively towards goals, opportunities for human feedback, c...\n","\n","๐Ÿ”ธ Advanced RAG:\n"," Response: The key capabilities of AI agents include strong performance on complex tasks involving reasoning and tool execution, the ability to work iteratively towards goals, opportunities for human feedback, clear leadership, defined planning phases with opportunities for plan refinement, intelligent message filtering, and dynamic teams with agents possessing specific skills relevant to the current sub-task. These capabilities contribute to increased performance compared to architectures lacking these elements.\n","\n","๐Ÿ“‹ Test Query 2: 'How do you evaluate agent performance metrics?'\n","--------------------------------------------------\n","๐Ÿ”น Basic RAG:\n"," Response: By considering objective evaluation metrics like success rate, output similarity to human responses, and overall efficiency, as well as more nuanced or subjective measures such as efficiency of tool u...\n","\n","๐Ÿ”ธ Advanced RAG:\n"," Response: Evaluate agent performance metrics by considering objective evaluation metrics like success rate, output similarity to human responses, and overall efficiency. It is also important to take into account more nuanced or subjective measures of performance such as efficiency of tool use, reliability, and robustness of planning. Additionally, real-world applicability should be assessed by evaluating performance on tasks that cover a wide breadth of topics and are sourced from real conversations or issues, rather than just logic puzzles or video games.\n","\n","๐Ÿ“‹ Test Query 3: 'Explain the benefits and challenges of multimodal AI systems'\n","--------------------------------------------------\n","๐Ÿ”น Basic RAG:\n"," Response: Multimodal AI systems offer the advantage of combining multiple modes of input, such as text, images, and speech, to enhance understanding and improve performance on various tasks. By leveraging diffe...\n","\n","๐Ÿ”ธ Advanced RAG:\n"," Response: Multimodal AI systems offer the advantage of combining different modalities such as text, images, and speech to enhance understanding and performance in various tasks. By leveraging multiple modalities, these systems can provide more comprehensive and nuanced insights, leading to improved accuracy and effectiveness in tasks that require multimodal input. However, challenges may arise in multimodal AI systems related to data integration, model complexity, and computational resources. Coordinating information from different modalities, ensuring alignment between them, and managing the increased complexity of multimodal models can be demanding tasks. Additionally, training and deploying multimodal AI systems may require more computational resources compared to unimodal systems, potentially leading to longer processing times and higher resource consumption.\n","\n","============================================================\n","๐ŸŽฏ Assignment Status:\n"," Completed: 5/5 components\n","\n","๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\n"," โœ… Node postprocessors for result filtering\n"," โœ… Response synthesizers for better answers\n"," โœ… Structured outputs for reliable data\n"," โœ… Advanced pipelines combining all techniques\n","\n","๐Ÿš€ You're ready for production RAG systems!\n","\n","๐Ÿ’ก Key learnings:\n"," - Postprocessors improve result relevance and precision\n"," - Different synthesizers work better for different query types\n"," - Structured outputs enable reliable system integration\n"," - Advanced techniques can be combined for production systems\n"]}],"source":["# Final comparison: Basic vs Advanced RAG\n","print(\"๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\")\n","print(\"=\" * 60)\n","\n","# Test queries for comparison\n","test_queries = [\n"," \"What are the key capabilities of AI agents?\",\n"," \"How do you evaluate agent performance metrics?\",\n"," \"Explain the benefits and challenges of multimodal AI systems\"\n","]\n","\n","# Check if all components were created\n","components_status = {\n"," \"Basic Index\": index is not None,\n"," \"Similarity Filter\": 'filtered_engine' in locals() and filtered_engine is not None,\n"," \"TreeSummarize\": 'tree_engine' in locals() and tree_engine is not None,\n"," \"Structured Output\": 'structured_program' in locals() and structured_program is not None,\n"," \"Advanced Pipeline\": 'advanced_pipeline' in locals() and advanced_pipeline is not None\n","}\n","\n","print(\"\\n๐Ÿ“Š Component Status:\")\n","for component, status in components_status.items():\n"," status_icon = \"โœ…\" if status else \"โŒ\"\n"," print(f\" {status_icon} {component}\")\n","\n","# Create basic query engine for comparison\n","if index:\n"," print(\"\\n๐Ÿ” Creating basic query engine for comparison...\")\n"," basic_engine = index.as_query_engine(similarity_top_k=5)\n","\n"," print(\"\\n\" + \"=\" * 60)\n"," print(\"๐Ÿ†š COMPARISON: Basic vs Advanced RAG\")\n"," print(\"=\" * 60)\n","\n"," for i, query in enumerate(test_queries, 1):\n"," print(f\"\\n๐Ÿ“‹ Test Query {i}: '{query}'\")\n"," print(\"-\" * 50)\n","\n"," # Basic RAG\n"," print(\"๐Ÿ”น Basic RAG:\")\n"," if basic_engine:\n"," # Uncomment when testing:\n"," basic_response = basic_engine.query(query)\n"," print(f\" Response: {str(basic_response)[:200]}...\")\n"," #print(\" (Standard vector search + simple response)\")\n","\n"," # Advanced RAG (if implemented)\n"," print(\"\\n๐Ÿ”ธ Advanced RAG:\")\n"," if components_status[\"Advanced Pipeline\"]:\n"," # Uncomment when testing:\n"," advanced_response = advanced_pipeline.query(query)\n"," print(f\" Response: {advanced_response}\")\n"," #print(\" (Filtered + TreeSummarize + Structured output)\")\n"," else:\n"," print(\" Complete the advanced pipeline function to test\")\n","\n","# Final status\n","print(\"\\n\" + \"=\" * 60)\n","print(\"๐ŸŽฏ Assignment Status:\")\n","completed_count = sum(components_status.values())\n","total_count = len(components_status)\n","\n","print(f\" Completed: {completed_count}/{total_count} components\")\n","\n","if completed_count == total_count:\n"," print(\"\\n๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\")\n"," print(\" โœ… Node postprocessors for result filtering\")\n"," print(\" โœ… Response synthesizers for better answers\")\n"," print(\" โœ… Structured outputs for reliable data\")\n"," print(\" โœ… Advanced pipelines combining all techniques\")\n"," print(\"\\n๐Ÿš€ You're ready for production RAG systems!\")\n","else:\n"," missing = total_count - completed_count\n"," print(f\"\\n๐Ÿ“ Complete {missing} more components to finish the assignment:\")\n"," for component, status in components_status.items():\n"," if not status:\n"," print(f\" - {component}\")\n","\n","print(\"\\n๐Ÿ’ก Key learnings:\")\n","print(\" - Postprocessors improve result relevance and precision\")\n","print(\" - Different synthesizers work better for different query types\")\n","print(\" - Structured outputs enable reliable system integration\")\n","print(\" - Advanced techniques can be combined for production systems\")\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"name":"python","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"a9d4ddd2138e497f9050b59d4a579687":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_28265196138e4fc2b71a6f28b6c9ae0c","IPY_MODEL_a341f62d1b374c63b71f478afe421377","IPY_MODEL_1a852a69c3274338bd6634fb0c16a457"],"layout":"IPY_MODEL_d1a87baee2b34ff39294a9957609ae33"}},"28265196138e4fc2b71a6f28b6c9ae0c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0b6a9975c3a84f048b3b34e678c1a3d8","placeholder":"โ€‹","style":"IPY_MODEL_70441a90759e419fbc118642ee3afeb8","value":"Parsingโ€‡nodes:โ€‡100%"}},"a341f62d1b374c63b71f478afe421377":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e80346f88fa049f9bc4690b520faeadf","max":42,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2607a80a19ef469a852a574dde7deec9","value":42}},"1a852a69c3274338bd6634fb0c16a457":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_752bdeb9460947cdb9731e87828b93c7","placeholder":"โ€‹","style":"IPY_MODEL_a9a4ffd40f97484fa39374570f6fd006","value":"โ€‡42/42โ€‡[00:00<00:00,โ€‡215.18it/s]"}},"d1a87baee2b34ff39294a9957609ae33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b6a9975c3a84f048b3b34e678c1a3d8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"70441a90759e419fbc118642ee3afeb8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e80346f88fa049f9bc4690b520faeadf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2607a80a19ef469a852a574dde7deec9":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"752bdeb9460947cdb9731e87828b93c7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a9a4ffd40f97484fa39374570f6fd006":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"18aba1e55c084b45a59d723c920649db":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a8395d6fb02e49cb8062308a40db32b8","IPY_MODEL_4d2373e1731b46f6960ae50e48ea2d33","IPY_MODEL_cc18d8a0b8904643a0cd1f13b7ba8bd3"],"layout":"IPY_MODEL_e051a3be3c35478cbd343dbacdd48acf"}},"a8395d6fb02e49cb8062308a40db32b8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a011adb7c30245deb082532acfd038cb","placeholder":"โ€‹","style":"IPY_MODEL_4208e397a3a7458aa5b16c91bc7b5ac8","value":"Generatingโ€‡embeddings:โ€‡100%"}},"4d2373e1731b46f6960ae50e48ea2d33":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_f1c05ad90ee74a1fbb77d618ff193971","max":93,"min":0,"orientation":"horizontal","style":"IPY_MODEL_656d18ae543a463ea98145be9cc6771e","value":93}},"cc18d8a0b8904643a0cd1f13b7ba8bd3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6141e0010d114134ba65680c86be8720","placeholder":"โ€‹","style":"IPY_MODEL_c86f10069df241d3adfcf0f884a61389","value":"โ€‡93/93โ€‡[00:53<00:00,โ€‡โ€‡1.82it/s]"}},"e051a3be3c35478cbd343dbacdd48acf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a011adb7c30245deb082532acfd038cb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4208e397a3a7458aa5b16c91bc7b5ac8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f1c05ad90ee74a1fbb77d618ff193971":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"656d18ae543a463ea98145be9cc6771e":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"6141e0010d114134ba65680c86be8720":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c86f10069df241d3adfcf0f884a61389":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_3a_basic_gradio_rag.ipynb b/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_3a_basic_gradio_rag.ipynb deleted file mode 100644 index 5c35784..0000000 --- a/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_3a_basic_gradio_rag.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"sFWLqj78Hkz3"},"source":["# Assignment 3a: Basic Gradio RAG Frontend\n","## Day 6 Session 2 - Building Simple RAG Applications\n","\n","In this assignment, you'll build a simple Gradio frontend for your RAG system with just the essential features:\n","- Button to initialize the vector database\n","- Search query input and button\n","- Display of AI responses\n","\n","**Learning Objectives:**\n","- Create basic Gradio interfaces\n","- Connect RAG backend to frontend\n","- Handle user interactions and database initialization\n","- Build functional AI-powered web applications\n","\n","**Prerequisites:**\n","- Completed Assignment 1 (Vector Database Basics)\n","- Completed Assignment 2 (Advanced RAG)\n","- Understanding of LlamaIndex fundamentals\n"]},{"cell_type":"markdown","metadata":{"id":"ZnOQlWbVHkz5"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries for building your Gradio RAG application.\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"vvsKCOpDHxNV","executionInfo":{"status":"ok","timestamp":1762098161984,"user_tz":-330,"elapsed":13088,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"4ad37e51-53b9-45d9-d750-855729cd889d"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["# If it's in a specific folder (e.g., \"Projects/MyProject/\")\n","!pip install -r '/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"0dIMu0onUfQZ","executionInfo":{"status":"ok","timestamp":1762099657508,"user_tz":-330,"elapsed":56094,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"7d2de8f7-b3f8-4a2d-da92-2192541a3330"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m26.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m92.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m67.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m104.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m22.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m16.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m96.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m26.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m19.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m12.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=ddcea9f4d4fb0e3627f63f4273e44a3f3c30abbf190c69dfcc64f51f352d3803\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"ae862be1c9ae416ab3c67ffa196e0647"}},"metadata":{}}]},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"n9bNl5sSHkz5","executionInfo":{"status":"ok","timestamp":1762099739954,"user_tz":-330,"elapsed":46928,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"7a060eab-1636-4fd2-95ed-e1784d9bace9"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","\n","# LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"YgGcrP-0Hkz6"},"source":["## ๐Ÿค– Part 2: RAG Backend Class\n","\n","Create a simple RAG backend that can initialize the database and answer queries.\n"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":386,"referenced_widgets":["29c8526118734cdfb4e39787887b3c8d","8adf73dc0ed649a5a8c5a5d9025e48ff","2fe46a51fb8144f1bda9f7a291088063","ac01b9c0a1f642feb84b714053ba82fd","5380a2c612934f608b72ab08f38606d3","f848556fe0254588a75c886a9d8f2eea","4d2cd19e968d42a5aeeca9e450d1bd8b","64ac224325924d94b411ee2848bab330","d4f769a917124dc4b807dc9175c986e7","4b350e017a5d4695905ee915f7d28e51","7dd16942c28c4f14808cdbcf859c21f2","23a2f365cc6f4f8ebc5fdacdf77b7005","294103a64b724f1bb2d040ed39e741e2","ab3a38c9dd974da787b6c5ef7ed47b0b","fbf20aad929a4055b71784bbbe47e48e","54c77334589a4ca28c74dd164fa4c811","46cee2f68aba4304af164cfd8f03a27b","46ac5f7f07f54b9cac4df267afdea68d","c6af8202f2ec44c88bdf9b7509636ad2","b87a4b50638c487abf04b62651fc1ecf","af461b7a4bca42a4bc803234b85adc38","cdee1e8d4ae645cdacd7844a35129b29","56efd96439544945bcfca3e79cbb1a29","2afb8c115dfd492aa81ea3208fc6371b","3fca0ca9cbd7429f91aca4288f4c232b","af6cb5768d1244f0861609db8181e1e0","db890f449baf471fa1730e0f49a65894","e7b24e6d82b84fe3a4ed97aab23487af","88ba09d1e813487598ff3fe92a87b672","5f3109bfe82948e9a85cf51c49d93846","f78db77bab864007b6f85443efec8bbb","36e4e1bd96f042e5ad186b304f1d716c","11ab2513be86450d8f17eff3fcac390f","80f64e3d3ea14fee938443de745a4cc2","464a7aa64455427a8b241987d40575f2","ca08abbe9fde41d9871b5ab91535ba88","50ad34e4863d4f3ba07f18185521f42c","6f3b172cb35e447f961bd0978442bf0e","3babe7706f5949afbfa3757b0bfbbaf4","25526400e1604aada1f5e6bda33a9089","c471b58ef4504e248e056fbe4a262281","dee29c448abb430e925c366228d74bed","2de1b5901612494f87b0ca0fed9b66d3","88dbdbdea2354247ad95e67a1090a190","594df45c0a094b68a268b0d5412af94d","e6986b93f60f421e94dd37ab830e0de3","3bac9dfe41604d0583bdca6f932033b8","30214e34272040679dc5fa15f720064f","0229d2ab797a4dffac04e96d6b08e699","456146dfda614fdf900fa253411f2643","b26b541ab9d3415cb742df152837a21a","7c4f7c59205741c09bb0c45a2ee079fe","5d4b6948dbbe487393d04ec5a966fbec","bbb4be2fa3a04ce99b1e93d5c447d7d6","0025ea31940e419b828376df6b1cc37b","095b448393d44d8a87995166e71f5f95","63899673860b4061886ae8d7b5a03713","3cda42948f734557b95e86700cc19270","3e5402451c724a5a82754985a18754ce","9a1d3ffc42094c04b9ffa54b350c1804","31f349efe39d4d9ca69fb36ddc712d3f","b7f1eed940a545c09e6c496e2aae44d2","38c8fb204aa4425d8e4a51258ae164ca","36b571f73d0e41f888f9e2ac882b6675","5cd90753e88945db85ebd4047c5f1089","7ab7fb0dabd2431b8da42f29112ad550","73fed5108a93410aa40e387264255160","da14e21da45546abb098e700e5e60a33","c8f9b4a9134c47929021528fc8f64f71","0c5f353360e04cc6af1f2487fc30b3ff","34aa9d04461b4e919d07830d23f9fda1","651dcd57e8aa45fa89268841297ae6a3","526bf032890a4256aefc8d50cc7d8371","bdb8cfc7dfc242799a40586df8b91d6e","82b23de6ea3f46cd984e90b3ab9f3eec","769d61e92083433aac21e9f4428a30e3","fdfdbff8f89f49b387ddb0e4061afddf","dbb0169d244247b59d4f972e34f07d57","007c89344ae949d9b6aafc8c14bcfe88","d85e273c60b649f89d1a34cc8c6b8933","0b6ef9ec809f40f8a9ba79a0f7e13dfc","3e53ee3f24d64bc7a524ce7e78c29f17","52389e6daed54e5d854b20217b53e62d","82d09953a81741b3b6f799ce907f7739","a6759f8c292d4927b424ff16987d120e","ed6c2f99446c49eba1b026cb1dcdd991","fdae79be31cc471ca7d2a3d830b937fe","30494ca740bb48708a8a5d8f46960cee","c94a678808f649e09449d5e0c1610998","243c57bc25b34a3c93c038995dabb6c8","d35a8e85ebc24775923dcb6fc8bf3b94","6f7bbd2e653a40eb82fb35a9465d8aa9","98461d6a77d5473d94da2a21976125f2","87b2133871ef4e7cab1bde01b0a0f638","6067c0a455e74365ac64b9d6d197b415","299b4358ad4c4dff91b3714c90ee2a2b","235e44623aac4ca684a05cf615e09e7f","0060126456ba450bb5994823c19a8fe0","dee0a0429ea140d2b94c01efffe6834b","f1c8234481fe42e488d6d94fc0bc04b3","a50910a2209c462bbdbd8c6879221290","ec48f5c6933444cdad1b8acf78f4ecdf","1475e0bfd0e448cab6d9e5946baf87db","ae00ff78797446818d0a01ebf2827171","90a843cf4afb4fc5a0e393ece36cac50","deb3da53beb5450f8369e6780aeeb918","6b0b8031053a4d669fbc4d5d40b228e3","45dcadbbcc014b7398c33f65f51ed55a","1f0817d471364e16aa312e55ff02e05b","211a9a84c96e431d8e3e28cc91036e93","c4fc0f1e4e9b4b239867d23d2b4bb700","7051d3fd92b5498597da75e856b6ccf0","ebfa88ffe54f41fc8ca89f5f65664a91","3e92ee759cd74c4a8d97e5ef5c0de5aa","14b8e6e1560d445e9634b103b408001e","8cc241c20c8e49c69f97b2b30ea07192","aaf21bb0f9a047869f2e4c409e507f56","c88f0ef122da461cbb26199c236b6d09","711140217c5b487db9bd3d0f70ffd5dd","ae8b66f993804200bc802ae72a6ff8e9","f576212311c94fffa464747883fa789b"]},"id":"xU3YJR9GHkz7","executionInfo":{"status":"ok","timestamp":1762100197424,"user_tz":-330,"elapsed":17917,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"f5f27747-4d8e-4255-99b2-db45d4c5d33a"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00"],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":4}],"source":["print(\"๐ŸŽ‰ Launching your Basic RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Enter a question in the query box\")\n","print(\"4. Click 'Ask Question' to get AI response\")\n","print(\"\")\n","print(\"๐Ÿ’ก Example questions to try:\")\n","print(\"- What are the main topics in the documents?\")\n","print(\"- Summarize the key findings\")\n","print(\"- Explain the methodology used\")\n","print(\"\")\n","print(\"๐Ÿš€ Launch your app:\")\n","\n","# Your launch code here:\n","# Uncomment when implemented\n","basic_interface.launch(share=True)"]},{"cell_type":"markdown","metadata":{"id":"v51KJpIHHkz9"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [x] RAG backend is provided and working\n","- [ ] Created Gradio interface with required components:\n"," - [ ] Title and description using gr.Markdown()\n"," - [ ] Initialize database button using gr.Button()\n"," - [ ] Status output using gr.Textbox()\n"," - [ ] Query input field using gr.Textbox()\n"," - [ ] Submit query button using gr.Button()\n"," - [ ] Response output area using gr.Textbox()\n","- [ ] Connected buttons to backend functions using .click()\n","- [ ] Successfully launched the application\n","- [ ] Tested the full workflow (initialize โ†’ query โ†’ response)\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built your first Gradio RAG application! You now have:\n","\n","- A functional web interface for your RAG system\n","- Understanding of Gradio basics and component connections\n","- A foundation for building more complex AI applications\n","\n","**Next Steps**: Complete Assignment 3b to add advanced configuration options to your RAG interface!\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"29c8526118734cdfb4e39787887b3c8d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_8adf73dc0ed649a5a8c5a5d9025e48ff","IPY_MODEL_2fe46a51fb8144f1bda9f7a291088063","IPY_MODEL_ac01b9c0a1f642feb84b714053ba82fd"],"layout":"IPY_MODEL_5380a2c612934f608b72ab08f38606d3"}},"8adf73dc0ed649a5a8c5a5d9025e48ff":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f848556fe0254588a75c886a9d8f2eea","placeholder":"โ€‹","style":"IPY_MODEL_4d2cd19e968d42a5aeeca9e450d1bd8b","value":"modules.json:โ€‡100%"}},"2fe46a51fb8144f1bda9f7a291088063":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_64ac224325924d94b411ee2848bab330","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d4f769a917124dc4b807dc9175c986e7","value":349}},"ac01b9c0a1f642feb84b714053ba82fd":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4b350e017a5d4695905ee915f7d28e51","placeholder":"โ€‹","style":"IPY_MODEL_7dd16942c28c4f14808cdbcf859c21f2","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡23.1kB/s]"}},"5380a2c612934f608b72ab08f38606d3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f848556fe0254588a75c886a9d8f2eea":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d2cd19e968d42a5aeeca9e450d1bd8b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"64ac224325924d94b411ee2848bab330":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d4f769a917124dc4b807dc9175c986e7":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"4b350e017a5d4695905ee915f7d28e51":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7dd16942c28c4f14808cdbcf859c21f2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"23a2f365cc6f4f8ebc5fdacdf77b7005":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_294103a64b724f1bb2d040ed39e741e2","IPY_MODEL_ab3a38c9dd974da787b6c5ef7ed47b0b","IPY_MODEL_fbf20aad929a4055b71784bbbe47e48e"],"layout":"IPY_MODEL_54c77334589a4ca28c74dd164fa4c811"}},"294103a64b724f1bb2d040ed39e741e2":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_46cee2f68aba4304af164cfd8f03a27b","placeholder":"โ€‹","style":"IPY_MODEL_46ac5f7f07f54b9cac4df267afdea68d","value":"config_sentence_transformers.json:โ€‡100%"}},"ab3a38c9dd974da787b6c5ef7ed47b0b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c6af8202f2ec44c88bdf9b7509636ad2","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b87a4b50638c487abf04b62651fc1ecf","value":124}},"fbf20aad929a4055b71784bbbe47e48e":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_af461b7a4bca42a4bc803234b85adc38","placeholder":"โ€‹","style":"IPY_MODEL_cdee1e8d4ae645cdacd7844a35129b29","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡5.79kB/s]"}},"54c77334589a4ca28c74dd164fa4c811":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46cee2f68aba4304af164cfd8f03a27b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46ac5f7f07f54b9cac4df267afdea68d":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c6af8202f2ec44c88bdf9b7509636ad2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b87a4b50638c487abf04b62651fc1ecf":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"af461b7a4bca42a4bc803234b85adc38":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cdee1e8d4ae645cdacd7844a35129b29":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"56efd96439544945bcfca3e79cbb1a29":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_2afb8c115dfd492aa81ea3208fc6371b","IPY_MODEL_3fca0ca9cbd7429f91aca4288f4c232b","IPY_MODEL_af6cb5768d1244f0861609db8181e1e0"],"layout":"IPY_MODEL_db890f449baf471fa1730e0f49a65894"}},"2afb8c115dfd492aa81ea3208fc6371b":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e7b24e6d82b84fe3a4ed97aab23487af","placeholder":"โ€‹","style":"IPY_MODEL_88ba09d1e813487598ff3fe92a87b672","value":"README.md:โ€‡"}},"3fca0ca9cbd7429f91aca4288f4c232b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5f3109bfe82948e9a85cf51c49d93846","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_f78db77bab864007b6f85443efec8bbb","value":1}},"af6cb5768d1244f0861609db8181e1e0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_36e4e1bd96f042e5ad186b304f1d716c","placeholder":"โ€‹","style":"IPY_MODEL_11ab2513be86450d8f17eff3fcac390f","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡5.95MB/s]"}},"db890f449baf471fa1730e0f49a65894":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e7b24e6d82b84fe3a4ed97aab23487af":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"88ba09d1e813487598ff3fe92a87b672":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5f3109bfe82948e9a85cf51c49d93846":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"f78db77bab864007b6f85443efec8bbb":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"36e4e1bd96f042e5ad186b304f1d716c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"11ab2513be86450d8f17eff3fcac390f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"80f64e3d3ea14fee938443de745a4cc2":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_464a7aa64455427a8b241987d40575f2","IPY_MODEL_ca08abbe9fde41d9871b5ab91535ba88","IPY_MODEL_50ad34e4863d4f3ba07f18185521f42c"],"layout":"IPY_MODEL_6f3b172cb35e447f961bd0978442bf0e"}},"464a7aa64455427a8b241987d40575f2":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3babe7706f5949afbfa3757b0bfbbaf4","placeholder":"โ€‹","style":"IPY_MODEL_25526400e1604aada1f5e6bda33a9089","value":"sentence_bert_config.json:โ€‡100%"}},"ca08abbe9fde41d9871b5ab91535ba88":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c471b58ef4504e248e056fbe4a262281","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_dee29c448abb430e925c366228d74bed","value":52}},"50ad34e4863d4f3ba07f18185521f42c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2de1b5901612494f87b0ca0fed9b66d3","placeholder":"โ€‹","style":"IPY_MODEL_88dbdbdea2354247ad95e67a1090a190","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡5.00kB/s]"}},"6f3b172cb35e447f961bd0978442bf0e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3babe7706f5949afbfa3757b0bfbbaf4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"25526400e1604aada1f5e6bda33a9089":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c471b58ef4504e248e056fbe4a262281":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dee29c448abb430e925c366228d74bed":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2de1b5901612494f87b0ca0fed9b66d3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"88dbdbdea2354247ad95e67a1090a190":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"594df45c0a094b68a268b0d5412af94d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_e6986b93f60f421e94dd37ab830e0de3","IPY_MODEL_3bac9dfe41604d0583bdca6f932033b8","IPY_MODEL_30214e34272040679dc5fa15f720064f"],"layout":"IPY_MODEL_0229d2ab797a4dffac04e96d6b08e699"}},"e6986b93f60f421e94dd37ab830e0de3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_456146dfda614fdf900fa253411f2643","placeholder":"โ€‹","style":"IPY_MODEL_b26b541ab9d3415cb742df152837a21a","value":"config.json:โ€‡100%"}},"3bac9dfe41604d0583bdca6f932033b8":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_7c4f7c59205741c09bb0c45a2ee079fe","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_5d4b6948dbbe487393d04ec5a966fbec","value":743}},"30214e34272040679dc5fa15f720064f":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bbb4be2fa3a04ce99b1e93d5c447d7d6","placeholder":"โ€‹","style":"IPY_MODEL_0025ea31940e419b828376df6b1cc37b","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡71.6kB/s]"}},"0229d2ab797a4dffac04e96d6b08e699":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"456146dfda614fdf900fa253411f2643":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b26b541ab9d3415cb742df152837a21a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7c4f7c59205741c09bb0c45a2ee079fe":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5d4b6948dbbe487393d04ec5a966fbec":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"bbb4be2fa3a04ce99b1e93d5c447d7d6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0025ea31940e419b828376df6b1cc37b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"095b448393d44d8a87995166e71f5f95":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_63899673860b4061886ae8d7b5a03713","IPY_MODEL_3cda42948f734557b95e86700cc19270","IPY_MODEL_3e5402451c724a5a82754985a18754ce"],"layout":"IPY_MODEL_9a1d3ffc42094c04b9ffa54b350c1804"}},"63899673860b4061886ae8d7b5a03713":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_31f349efe39d4d9ca69fb36ddc712d3f","placeholder":"โ€‹","style":"IPY_MODEL_b7f1eed940a545c09e6c496e2aae44d2","value":"model.safetensors:โ€‡100%"}},"3cda42948f734557b95e86700cc19270":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_38c8fb204aa4425d8e4a51258ae164ca","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_36b571f73d0e41f888f9e2ac882b6675","value":133466304}},"3e5402451c724a5a82754985a18754ce":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5cd90753e88945db85ebd4047c5f1089","placeholder":"โ€‹","style":"IPY_MODEL_7ab7fb0dabd2431b8da42f29112ad550","value":"โ€‡133M/133Mโ€‡[00:02<00:00,โ€‡107MB/s]"}},"9a1d3ffc42094c04b9ffa54b350c1804":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"31f349efe39d4d9ca69fb36ddc712d3f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b7f1eed940a545c09e6c496e2aae44d2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"38c8fb204aa4425d8e4a51258ae164ca":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"36b571f73d0e41f888f9e2ac882b6675":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5cd90753e88945db85ebd4047c5f1089":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7ab7fb0dabd2431b8da42f29112ad550":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"73fed5108a93410aa40e387264255160":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_da14e21da45546abb098e700e5e60a33","IPY_MODEL_c8f9b4a9134c47929021528fc8f64f71","IPY_MODEL_0c5f353360e04cc6af1f2487fc30b3ff"],"layout":"IPY_MODEL_34aa9d04461b4e919d07830d23f9fda1"}},"da14e21da45546abb098e700e5e60a33":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_651dcd57e8aa45fa89268841297ae6a3","placeholder":"โ€‹","style":"IPY_MODEL_526bf032890a4256aefc8d50cc7d8371","value":"tokenizer_config.json:โ€‡100%"}},"c8f9b4a9134c47929021528fc8f64f71":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_bdb8cfc7dfc242799a40586df8b91d6e","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_82b23de6ea3f46cd984e90b3ab9f3eec","value":366}},"0c5f353360e04cc6af1f2487fc30b3ff":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_769d61e92083433aac21e9f4428a30e3","placeholder":"โ€‹","style":"IPY_MODEL_fdfdbff8f89f49b387ddb0e4061afddf","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡27.0kB/s]"}},"34aa9d04461b4e919d07830d23f9fda1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"651dcd57e8aa45fa89268841297ae6a3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"526bf032890a4256aefc8d50cc7d8371":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bdb8cfc7dfc242799a40586df8b91d6e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"82b23de6ea3f46cd984e90b3ab9f3eec":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"769d61e92083433aac21e9f4428a30e3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fdfdbff8f89f49b387ddb0e4061afddf":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dbb0169d244247b59d4f972e34f07d57":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_007c89344ae949d9b6aafc8c14bcfe88","IPY_MODEL_d85e273c60b649f89d1a34cc8c6b8933","IPY_MODEL_0b6ef9ec809f40f8a9ba79a0f7e13dfc"],"layout":"IPY_MODEL_3e53ee3f24d64bc7a524ce7e78c29f17"}},"007c89344ae949d9b6aafc8c14bcfe88":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_52389e6daed54e5d854b20217b53e62d","placeholder":"โ€‹","style":"IPY_MODEL_82d09953a81741b3b6f799ce907f7739","value":"vocab.txt:โ€‡"}},"d85e273c60b649f89d1a34cc8c6b8933":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a6759f8c292d4927b424ff16987d120e","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_ed6c2f99446c49eba1b026cb1dcdd991","value":1}},"0b6ef9ec809f40f8a9ba79a0f7e13dfc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_fdae79be31cc471ca7d2a3d830b937fe","placeholder":"โ€‹","style":"IPY_MODEL_30494ca740bb48708a8a5d8f46960cee","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡5.61MB/s]"}},"3e53ee3f24d64bc7a524ce7e78c29f17":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"52389e6daed54e5d854b20217b53e62d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"82d09953a81741b3b6f799ce907f7739":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a6759f8c292d4927b424ff16987d120e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"ed6c2f99446c49eba1b026cb1dcdd991":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"fdae79be31cc471ca7d2a3d830b937fe":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"30494ca740bb48708a8a5d8f46960cee":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c94a678808f649e09449d5e0c1610998":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_243c57bc25b34a3c93c038995dabb6c8","IPY_MODEL_d35a8e85ebc24775923dcb6fc8bf3b94","IPY_MODEL_6f7bbd2e653a40eb82fb35a9465d8aa9"],"layout":"IPY_MODEL_98461d6a77d5473d94da2a21976125f2"}},"243c57bc25b34a3c93c038995dabb6c8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_87b2133871ef4e7cab1bde01b0a0f638","placeholder":"โ€‹","style":"IPY_MODEL_6067c0a455e74365ac64b9d6d197b415","value":"tokenizer.json:โ€‡"}},"d35a8e85ebc24775923dcb6fc8bf3b94":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_299b4358ad4c4dff91b3714c90ee2a2b","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_235e44623aac4ca684a05cf615e09e7f","value":1}},"6f7bbd2e653a40eb82fb35a9465d8aa9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0060126456ba450bb5994823c19a8fe0","placeholder":"โ€‹","style":"IPY_MODEL_dee0a0429ea140d2b94c01efffe6834b","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡10.1MB/s]"}},"98461d6a77d5473d94da2a21976125f2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"87b2133871ef4e7cab1bde01b0a0f638":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6067c0a455e74365ac64b9d6d197b415":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"299b4358ad4c4dff91b3714c90ee2a2b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"235e44623aac4ca684a05cf615e09e7f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0060126456ba450bb5994823c19a8fe0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dee0a0429ea140d2b94c01efffe6834b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f1c8234481fe42e488d6d94fc0bc04b3":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a50910a2209c462bbdbd8c6879221290","IPY_MODEL_ec48f5c6933444cdad1b8acf78f4ecdf","IPY_MODEL_1475e0bfd0e448cab6d9e5946baf87db"],"layout":"IPY_MODEL_ae00ff78797446818d0a01ebf2827171"}},"a50910a2209c462bbdbd8c6879221290":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_90a843cf4afb4fc5a0e393ece36cac50","placeholder":"โ€‹","style":"IPY_MODEL_deb3da53beb5450f8369e6780aeeb918","value":"special_tokens_map.json:โ€‡100%"}},"ec48f5c6933444cdad1b8acf78f4ecdf":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6b0b8031053a4d669fbc4d5d40b228e3","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_45dcadbbcc014b7398c33f65f51ed55a","value":125}},"1475e0bfd0e448cab6d9e5946baf87db":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1f0817d471364e16aa312e55ff02e05b","placeholder":"โ€‹","style":"IPY_MODEL_211a9a84c96e431d8e3e28cc91036e93","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡8.50kB/s]"}},"ae00ff78797446818d0a01ebf2827171":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"90a843cf4afb4fc5a0e393ece36cac50":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"deb3da53beb5450f8369e6780aeeb918":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6b0b8031053a4d669fbc4d5d40b228e3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"45dcadbbcc014b7398c33f65f51ed55a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"1f0817d471364e16aa312e55ff02e05b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"211a9a84c96e431d8e3e28cc91036e93":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c4fc0f1e4e9b4b239867d23d2b4bb700":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_7051d3fd92b5498597da75e856b6ccf0","IPY_MODEL_ebfa88ffe54f41fc8ca89f5f65664a91","IPY_MODEL_3e92ee759cd74c4a8d97e5ef5c0de5aa"],"layout":"IPY_MODEL_14b8e6e1560d445e9634b103b408001e"}},"7051d3fd92b5498597da75e856b6ccf0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8cc241c20c8e49c69f97b2b30ea07192","placeholder":"โ€‹","style":"IPY_MODEL_aaf21bb0f9a047869f2e4c409e507f56","value":"config.json:โ€‡100%"}},"ebfa88ffe54f41fc8ca89f5f65664a91":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c88f0ef122da461cbb26199c236b6d09","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_711140217c5b487db9bd3d0f70ffd5dd","value":190}},"3e92ee759cd74c4a8d97e5ef5c0de5aa":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ae8b66f993804200bc802ae72a6ff8e9","placeholder":"โ€‹","style":"IPY_MODEL_f576212311c94fffa464747883fa789b","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡7.69kB/s]"}},"14b8e6e1560d445e9634b103b408001e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8cc241c20c8e49c69f97b2b30ea07192":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"aaf21bb0f9a047869f2e4c409e507f56":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c88f0ef122da461cbb26199c236b6d09":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"711140217c5b487db9bd3d0f70ffd5dd":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ae8b66f993804200bc802ae72a6ff8e9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f576212311c94fffa464747883fa789b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_3b_advanced_gradio_rag.ipynb b/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_3b_advanced_gradio_rag.ipynb deleted file mode 100644 index 9ca3fec..0000000 --- a/Monalisa_Samal/Day_6_Completed_4_assignments/assignment_3b_advanced_gradio_rag.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"pKDowy-WZFvi"},"source":["# Assignment 3b: Advanced Gradio RAG Frontend\n","## Day 6 Session 2 - Building Configurable RAG Applications\n","\n","In this assignment, you'll extend your basic RAG interface with advanced configuration options to create a professional, feature-rich RAG application.\n","\n","**New Features to Add:**\n","- Model selection dropdown (gpt-4o, gpt-4o-mini)\n","- Temperature slider (0 to 1 with 0.1 intervals)\n","- Chunk size configuration\n","- Chunk overlap configuration \n","- Similarity top-k slider\n","- Node postprocessor multiselect\n","- Similarity cutoff slider\n","- Response synthesizer multiselect\n","\n","**Learning Objectives:**\n","- Advanced Gradio components and interactions\n","- Dynamic RAG configuration\n","- Professional UI design patterns\n","- Parameter validation and handling\n","- Building production-ready AI applications\n","\n","**Prerequisites:**\n","- Completed Assignment 3a (Basic Gradio RAG)\n","- Understanding of RAG parameters and their effects\n"]},{"cell_type":"markdown","metadata":{"id":"EUgCpBomZFvl"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries including advanced RAG components for configuration options.\n","\n","**Note:** This assignment uses OpenRouter for LLM access (not OpenAI). Make sure you have your `OPENROUTER_API_KEY` environment variable set.\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"9JfGHoNeZKs9","executionInfo":{"status":"ok","timestamp":1762100857948,"user_tz":-330,"elapsed":22653,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"3680aa8b-e060-43a3-f8c7-0f948706d9b8"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["# If it's in a specific folder (e.g., \"Projects/MyProject/\")\n","!pip install -r '/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"cpEwIq29ZWU4","executionInfo":{"status":"ok","timestamp":1762100923592,"user_tz":-330,"elapsed":46970,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"c810f687-58d5-42d0-adb8-9808b40148cc"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m17.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m25.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m83.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m61.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m80.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m16.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m21.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m87.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m26.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m17.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m13.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=df6b8a2865a9c656c885fb982e01566928267089cd15a8f088b91321fb35968a\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"c44b2f91af8043c3ae103c955ddb8c15"}},"metadata":{}}]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"tS_6DiHHZFvm","executionInfo":{"status":"ok","timestamp":1762101233293,"user_tz":-330,"elapsed":51,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"632faa21-77bc-42a9-ccfe-0a0377784a00"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import all required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","\n","# LlamaIndex core components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"r87HNpLgZFvp"},"source":["## ๐Ÿค– Part 2: Advanced RAG Backend Class\n","\n","Create an advanced RAG backend that supports dynamic configuration of all parameters.\n"]},{"cell_type":"code","execution_count":3,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":386,"referenced_widgets":["e555075b3e35411da8a8fa2a151d071c","cacc05334346456fba8750702fa72804","1e0b3679aacd4fee8f374db0a9c80c6e","6895b036178344a8b4288bc9622beee9","76146673913a4dfc8058c1ba15b5dc0a","afe9e823311d4f8f98fe74c09b4bc65c","52328b934aa64d0ea8f48d5ee08c4d6f","2581605a93414b8fad1c6917c9e62041","bbf6c319eb91480fa1dd9a735b62117d","5c27b0f135394065919c1aa99328bb74","e7f0690ca1c94934976ec486dc37072f","5da85a1d4aec45f4a7d438187785f2e7","df63701c3c9540278f9e1f9ec72a05cf","007f597dd2e74066b2749182d3e77cbd","13231d4eb7b24594997fdad377e9a39f","0749219f0b4f4d5685b5d1fa2744e5c0","0b6f2f52082f44ecb213ad01fda25649","554fe1d4de3b4140a1bc8bc73f32aad5","9c0700eb9c1345219ead5383c7a5c7c4","0b2a765d805342d0a2b05c6da90d8db4","e955920cb26a440aad247bd03928a99d","85e9c9e790b04f3fb0babf5d36697b57","826e3e3fdd7c44f8ae069ac8cb179b57","88d753427218445e8970c92618c9e8ac","b9df30b27c4d47dca2b18340de6f3b56","3be7ef3867e54a7d98fc7b97de945d35","b728eeea9bbb4d6f8420486e76a27ed8","2d4f4bda9d1741b7a1fd6949a3570e3c","5d7ed8c9b4bc4129baec1ce03527bd10","4c3ad25ebbf54603b5d3e9313c742ac5","2ce5e030c5824644bd20b0f997faf3ff","2bdbf39069564c43a84eb87813cae3ab","df80b039fd1f4771a0e71bfafc219033","b5451d4ea98245509c782b8fe1d25cfc","efb15bc3d2524078ae47ed2b17099e2c","ce3a6a42427b47ddb8dec0b86b803e9d","e4e9564b34ff49eba064a27481b5d1a7","f1d99a3490034b77a72732c83785064d","49f0e8f020a2492ba68e874f4c00fe67","04a5bde86d3b4059a1e1f8a55199c7d8","366da23ff9c2454daddcb04eb9f416c0","b6b286c77de44f869f22e1fc3773005c","b58a7568a3f94b56876e77ed3576fe76","6d82cf3cdf9e4779975a7849593c5e3a","c218eae8906949508dc7fdd629c9e6c3","b6b8c9d1dba7435582d156aee18505a4","9bd3f34817134982bdee81d46a839ae5","5bbc689e0eb3486595f41c1aed1351ea","03a5f56c249448cb8a9c00963576195c","1a5c47d3aef547308d8cf4178b3d277d","c4ba4f2a1daf4233a94e64462601d006","cc94e340844d461caac8491af4171a0f","6b179591713b436c9627d102187dd0e2","ec3fd1520af74520a7a6a5a00fc78035","f4f62182f094460baf830448f3d016a5","bd76ef35f6034a0faa3eafdd92268b65","1b909e7746564a06af03d038b1f9a028","7bfbc726c13343c9af40bc4843cf6e88","a014631e0033471a8281112845e82e77","1bbae592d24d4af9b635c6c01a783b71","7f6d51e4097347219ee26cfc8bfc7487","8bf2267284124019b5db59f5b74ec0fb","fc3b98227c2340519a308e83456d0548","3be865e0fbe1479d950f208ba22c697c","59ba8c325ccb45f9b61b973f19926d33","7aad31231ba54d6183b865df565d2f15","022a6a5527534d6492f1853944362cab","dd05f368aaae4881beda9caf48beb67d","5c8b5945e01743719b75550e8ae2234d","cb37c858fa744c64ab659ff78e3a9fb7","5e108b1d57e24a48938cd4fdc4e2f90d","7575469fb0a34897bffa23cf6e1959ee","ddce348992f1480cacede7855bd69e0b","ccf49349f1b54a45986d54df8cdf06a6","a4e3d8eaae084d41a49aece4ce89cb31","ad7b2c8da94a4125adcae2d18ff632b6","5f02b68046014a6b8482a3814c5ee44a","bc86fc01faf04f0b8e8982dde6aebfee","22ed11bcf43c41f2a6813eae2c5fbfb5","2f6e11fed22842b1a57153781b773758","fcb7052b94fc47f1b7734de1568e17f0","42ad5a129ec844eebe6009fbaeb8c475","61cda27f064b484aa9446e74cef059d2","fc94e5ee2e5e40a7abfc8595fa1b8bba","9ced3f5dc0c341e39f156a51a6de3162","4283b4daac544179ba8d2ac59a6a890a","10f5e0c42d864e2282a393cf169df9b8","2df9ec0f953445d397128e084c33c738","01e6ba1052dc4b939e2116563f3b2df5","9bb7052ed725400ab5a0a050a3f62e51","5bec150283b340ee8c30834c5a1fbaa5","ed69cd4fb29744719b6291e34cdcc8ef","cf994cff745c40b986c387f5bdf1a429","dee358c514304f40a092cfddee6cd4ff","0696f75acf1f4014a565de28ae7c8462","d7347f2c6fa2425588106b7e2a9699f0","b2d78955677045ef8b43f934aaabfd97","61356446b8174f93b6c045e7c0741b0b","005afce015354a6fa0dacf9a01bba361","a999fe81cf984effb2f621da0b338823","27e0a306b76d4ec4a50290ccfdd7755a","539f802d01ef45ecb3c750616d36f3e8","057558df75e947a8af2e5a4538d3c26c","ae265553fe7d4b33b131c6d46ce02f44","00dd28179d6a41d8812aafda5135507c","5cb1be0cd24141f1a32e86b9ea09ec86","282fa42f92e84c4c9a2a886d5d85df4a","54382bef09384ff29564c3d7b6c5ea89","e6d8e6a9d954452bb8adb8d3d101939d","5df80152d588453ea46ae6c73eac0a19","105d74d437fd4d19a2beab025055b969","58679b3e995b413298ccceba14c5cee5","702bc5c5d42a48faa14ee2a661471b2c","827b227da03a4f39b4eebf2ebca7f147","3b03d89f230747c799418596fdd038cb","c6e4f05eceea4daa9f9ed046faa160f6","42e89191795b4c82b3b3970f638950ae","3108cef227b24e0499f26f823e89d1f3","f883d8d9f738476887c97c94dcf184df","999f8bcc0b1d4083922105f549167f1b","8a1597c0aaa84bb29042275aae7de6f2"]},"id":"J9fb_kzHZFvp","executionInfo":{"status":"ok","timestamp":1762101939002,"user_tz":-330,"elapsed":8584,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"d5f1fbab-755d-4857-fbe8-70361f0a1a14"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00 Dict[str, Any]:\n"," \"\"\"Query the RAG system with advanced configuration.\"\"\"\n","\n"," # Check if index exists\n"," if self.index is None:\n"," return {\"response\": \"โŒ Please initialize the database first!\", \"sources\": [], \"config\": {}}\n","\n"," # Check if question is empty\n"," if not question or not question.strip():\n"," return {\"response\": \"โš ๏ธ Please enter a question first!\", \"sources\": [], \"config\": {}}\n","\n"," try:\n"," # Update settings with new parameters\n"," self.update_settings(model, temperature, chunk_size, chunk_overlap)\n","\n"," # Get postprocessors\n"," postprocessors = []\n"," for name in postprocessor_names:\n"," processor = self.get_postprocessor(name, similarity_cutoff)\n"," if processor is not None:\n"," postprocessors.append(processor)\n","\n"," # Get synthesizer\n"," synthesizer = self.get_synthesizer(synthesizer_name)\n","\n"," # Create query engine with all parameters\n"," query_engine_kwargs = {\"similarity_top_k\": similarity_top_k}\n"," if postprocessors:\n"," query_engine_kwargs[\"node_postprocessors\"] = postprocessors\n"," if synthesizer is not None:\n"," query_engine_kwargs[\"response_synthesizer\"] = synthesizer\n","\n"," query_engine = self.index.as_query_engine(**query_engine_kwargs)\n","\n"," # Query and get response\n"," response = query_engine.query(question)\n","\n"," # Extract source information if available\n"," sources = []\n"," if hasattr(response, 'source_nodes'):\n"," for node in response.source_nodes:\n"," sources.append({\n"," \"text\": node.text[:200] + \"...\",\n"," \"score\": getattr(node, 'score', 0.0),\n"," \"source\": getattr(node.node, 'metadata', {}).get('file_name', 'Unknown')\n"," })\n","\n"," return {\n"," \"response\": str(response),\n"," \"sources\": sources,\n"," \"config\": {\n"," \"model\": model,\n"," \"temperature\": temperature,\n"," \"chunk_size\": chunk_size,\n"," \"chunk_overlap\": chunk_overlap,\n"," \"similarity_top_k\": similarity_top_k,\n"," \"postprocessors\": postprocessor_names,\n"," \"similarity_cutoff\": similarity_cutoff,\n"," \"synthesizer\": synthesizer_name\n"," }\n"," }\n","\n"," except Exception as e:\n"," return {\"response\": f\"โŒ Error processing query: {str(e)}\", \"sources\": [], \"config\": {}}\n","\n","# Initialize the backend\n","rag_backend = AdvancedRAGBackend()\n","print(\"๐Ÿš€ Advanced RAG Backend initialized and ready!\")\n"]},{"cell_type":"markdown","metadata":{"id":"BBh6sV0PZFvs"},"source":["## ๐ŸŽจ Part 3: Advanced Gradio Interface\n","\n","Create a sophisticated Gradio interface with all the configuration options specified:\n","1. Database initialization button\n","2. Search query input and button \n","3. Model selection dropdown\n","4. Temperature slider\n","5. Chunk size and overlap inputs\n","6. Similarity top-k slider\n","7. Node postprocessor multiselect\n","8. Similarity cutoff slider\n","9. Response synthesizer multiselect\n"]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"KFi62HgfZFvs","executionInfo":{"status":"ok","timestamp":1762102261840,"user_tz":-330,"elapsed":282,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"ef9f12ed-eb6e-4caf-d2e0-638d338cfc9e"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG interface created successfully!\n"]}],"source":["def create_advanced_rag_interface():\n"," \"\"\"Create advanced RAG interface with full configuration options.\"\"\"\n","\n"," def initialize_db():\n"," \"\"\"Handle database initialization.\"\"\"\n"," return rag_backend.initialize_database()\n","\n"," def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer):\n"," \"\"\"Handle advanced RAG queries with all configuration options.\"\"\"\n"," result = rag_backend.advanced_query(\n"," question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer\n"," )\n","\n"," # Format configuration for display\n"," config_text = f\"\"\"**Current Configuration:**\n","- Model: {result['config'].get('model', 'N/A')}\n","- Temperature: {result['config'].get('temperature', 'N/A')}\n","- Chunk Size: {result['config'].get('chunk_size', 'N/A')}\n","- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')}\n","- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')}\n","- Postprocessors: {', '.join(result['config'].get('postprocessors', []))}\n","- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')}\n","- Synthesizer: {result['config'].get('synthesizer', 'N/A')}\"\"\"\n","\n"," return result[\"response\"], config_text\n","\n"," # TODO: Create the advanced interface structure\n"," # Hint: This interface needs more complex layout with configuration controls\n","\n"," with gr.Blocks(title=\"Advanced RAG Assistant\") as interface:\n"," # TODO: Add title and description\n"," # Hint: Use gr.Markdown() for formatted text\n","\n"," # Your title and description here:\n"," gr.Markdown(\"# ๐Ÿค– Advanced RAG Assistant\")\n"," gr.Markdown(\"Configure all RAG parameters for optimal performance and experiment with different settings!\")\n","\n","\n"," # TODO: Add database initialization section\n"," # Hint: Use gr.Button() for initialization and gr.Textbox() for status\n"," init_btn = gr.Button(\"๐Ÿ”„ Initialize Vector Database\", variant=\"primary\")\n"," status_output = gr.Textbox(label=\"Database Status\", lines=2, interactive=False)\n","\n"," # status_output = ?\n","\n","\n"," # TODO: Create main layout with columns\n"," # Hint: Configuration controls on left, query/response on right makes sense\n"," # Use gr.Row() and gr.Column() to organize this\n","\n"," with gr.Row():\n"," with gr.Column(scale=1):\n","\n"," gr.Markdown(\"### โš™๏ธ RAG Configuration\")\n","\n"," # TODO: Model selection\n"," # Hint: Use gr.Dropdown() with choices=[\"gpt-4o\", \"gpt-4o-mini\"]\n"," model_dropdown = gr.Dropdown(\n"," choices=[\"gpt-4o\", \"gpt-4o-mini\"],\n"," value=\"gpt-4o-mini\",\n"," label=\"LLM Model\"\n"," )\n","\n","\n","\n"," # TODO: Temperature control\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.1\n"," temperature_slider = gr.Slider(\n"," minimum=0.0, maximum=1.0, step=0.1, value=0.1,\n"," label=\"Temperature (0=deterministic, 1=creative)\"\n"," )\n","\n","\n","\n"," # TODO: Chunking parameters\n"," # Hint: Use gr.Number() for numeric inputs with default values\n"," chunk_size_input = gr.Number(\n"," value=512, minimum=128, maximum=2048,\n"," label=\"Chunk Size\"\n"," )\n","\n"," chunk_overlap_input = gr.Number(\n"," value=50, minimum=0, maximum=200,\n"," label=\"Chunk Overlap\"\n"," )\n","\n","\n"," # TODO: Retrieval parameters\n"," # Hint: Use gr.Slider() with minimum=1, maximum=20, step=1, value=5\n"," similarity_topk_slider = gr.Slider(\n"," minimum=1, maximum=20, step=1, value=5,\n"," label=\"Similarity Top-K (documents to retrieve)\"\n"," )\n","\n","\n"," # TODO: Postprocessor selection\n"," # Hint: Use gr.CheckboxGroup() with choices=[\"SimilarityPostprocessor\"]\n"," postprocessor_checkbox = gr.CheckboxGroup(\n"," choices=[\"SimilarityPostprocessor\"],\n"," value=[\"SimilarityPostprocessor\"],\n"," label=\"Node Postprocessors\"\n"," )\n","\n","\n"," # TODO: Similarity filtering\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.3\n"," similarity_cutoff_slider = gr.Slider(\n"," minimum=0.0, maximum=1.0, step=0.1, value=0.3,\n"," label=\"Similarity Cutoff (0=permissive, 1=strict)\"\n"," )\n","\n","\n"," # TODO: Response synthesizer\n"," # Hint: Use gr.Dropdown() with choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"]\n"," synthesizer_dropdown = gr.Dropdown(\n"," choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"],\n"," value=\"TreeSummarize\",\n"," label=\"Response Synthesizer\"\n"," )\n","\n","\n","\n"," with gr.Column(scale=2):\n"," gr.Markdown(\"### ๐Ÿ’ฌ Query Interface\")\n","\n"," # TODO: Query input\n"," # Hint: Use gr.Textbox() with label=\"Ask a question\", placeholder text, lines=3\n"," query_input = gr.Textbox(\n"," label=\"Ask a question\",\n"," placeholder=\"Enter your question about the documents...\",\n"," lines=3\n"," )\n","\n","\n"," # TODO: Submit button\n"," # Hint: Use gr.Button() with variant=\"primary\"\n"," submit_btn = gr.Button(\"๐Ÿš€ Ask Question\", variant=\"primary\")\n","\n","\n"," # TODO: Response output\n"," # Hint: Use gr.Textbox() with lines=12, interactive=False\n"," response_output = gr.Textbox(\n"," label=\"AI Response\",\n"," lines=12,\n"," interactive=False\n"," )\n","\n","\n"," # TODO: Configuration display\n"," # Hint: Use gr.Textbox() with lines=8, interactive=False\n"," config_display = gr.Textbox(\n"," label=\"Configuration Used\",\n"," lines=8,\n"," interactive=False\n"," )\n","\n","\n","\n"," # Uncomment to Connect functions to components\n"," init_btn.click(initialize_db, outputs=[status_output])\n","\n"," submit_btn.click(\n"," handle_advanced_query,\n"," inputs=[\n"," query_input, model_dropdown, temperature_slider,\n"," chunk_size_input, chunk_overlap_input, similarity_topk_slider,\n"," postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown\n"," ],\n"," outputs=[response_output, config_display]\n"," )\n","\n","\n"," return interface\n","\n","# Create the interface\n","advanced_interface = create_advanced_rag_interface()\n","print(\"โœ… Advanced RAG interface created successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"W7n-oefpZFvu"},"source":["## ๐Ÿš€ Part 4: Launch Your Advanced Application\n","\n","Launch your advanced Gradio application and test all the configuration options!\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"bLK1trbKZFvu","executionInfo":{"status":"ok","timestamp":1762102278662,"user_tz":-330,"elapsed":972,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"688af962-c7f6-4a1c-ccdf-1b1712c82c9c"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐ŸŽ‰ Launching your Advanced RAG Assistant...\n","๐Ÿ”— Your application will open in a new browser tab!\n","\n","โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\n","\n","๐Ÿ“‹ Testing Instructions:\n","1. Click 'Initialize Vector Database' button first\n","2. Wait for success message\n","3. Configure your RAG parameters:\n"," - Choose model (gpt-4o, gpt-4o-mini)\n"," - Adjust temperature (0.0 = deterministic, 1.0 = creative)\n"," - Set chunk size and overlap\n"," - Choose similarity top-k\n"," - Select postprocessors and synthesizer\n","4. Enter a question and click 'Ask Question'\n","5. Review both the response and configuration used\n","\n","๐Ÿงช Experiments to try:\n","- Compare different models with the same question\n","- Test temperature effects (0.1 vs 0.9)\n","- Try different chunk sizes (256 vs 1024)\n","- Compare synthesizers (TreeSummarize vs Refine)\n","- Adjust similarity cutoff to filter results\n","It looks like you are running Gradio on a hosted Jupyter notebook, which requires `share=True`. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n","\n","Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n","* Running on public URL: https://53611b3c5e55b9f39f.gradio.live\n","\n","This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"]},{"output_type":"display_data","data":{"text/plain":[""],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":5}],"source":["print(\"๐ŸŽ‰ Launching your Advanced RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Vector Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Configure your RAG parameters:\")\n","print(\" - Choose model (gpt-4o, gpt-4o-mini)\")\n","print(\" - Adjust temperature (0.0 = deterministic, 1.0 = creative)\")\n","print(\" - Set chunk size and overlap\")\n","print(\" - Choose similarity top-k\")\n","print(\" - Select postprocessors and synthesizer\")\n","print(\"4. Enter a question and click 'Ask Question'\")\n","print(\"5. Review both the response and configuration used\")\n","print(\"\")\n","print(\"๐Ÿงช Experiments to try:\")\n","print(\"- Compare different models with the same question\")\n","print(\"- Test temperature effects (0.1 vs 0.9)\")\n","print(\"- Try different chunk sizes (256 vs 1024)\")\n","print(\"- Compare synthesizers (TreeSummarize vs Refine)\")\n","print(\"- Adjust similarity cutoff to filter results\")\n","\n","# Your code here:\n","advanced_interface.launch()"]},{"cell_type":"markdown","metadata":{"id":"jFejTb9EZFvv"},"source":["## ๐Ÿ’ก Understanding the Configuration Options\n","\n","### Model Selection\n","- **gpt-4o**: Latest and most capable model, best quality responses\n","- **gpt-4o-mini**: Faster and cheaper while maintaining good quality\n","\n","### Temperature (0.0 - 1.0)\n","- **0.0-0.3**: Deterministic, factual responses\n","- **0.4-0.7**: Balanced creativity and accuracy\n","- **0.8-1.0**: More creative and varied responses\n","\n","### Chunk Size & Overlap\n","- **Chunk Size**: How much text to process at once (256-1024 typical)\n","- **Chunk Overlap**: Overlap between chunks to maintain context (10-100 typical)\n","\n","### Similarity Top-K (1-20)\n","- **Lower values (3-5)**: More focused, faster responses\n","- **Higher values (8-15)**: More comprehensive, detailed responses\n","\n","### Node Postprocessors\n","- **SimilarityPostprocessor**: Filters out low-relevance documents\n","\n","### Similarity Cutoff (0.0-1.0)\n","- **0.1-0.3**: More permissive, includes potentially relevant docs\n","- **0.5-0.8**: More strict, only highly relevant docs\n","\n","### Response Synthesizers\n","- **TreeSummarize**: Hierarchical summarization, good for complex topics\n","- **Refine**: Iterative refinement, builds detailed responses\n","- **CompactAndRefine**: Efficient version of Refine\n","- **Default**: Standard synthesis approach\n"]},{"cell_type":"markdown","metadata":{"id":"8oaw_QtBZFvv"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [ ] Set up your OPENROUTER_API_KEY environment variable\n","- [ ] Imported all necessary libraries including advanced RAG components\n","- [ ] Created AdvancedRAGBackend class with configurable parameters\n","- [ ] Implemented all required methods:\n"," - [ ] `update_settings()` - Updates LLM and chunking parameters\n"," - [ ] `initialize_database()` - Sets up vector database\n"," - [ ] `get_postprocessor()` - Returns selected postprocessor\n"," - [ ] `get_synthesizer()` - Returns selected synthesizer\n"," - [ ] `advanced_query()` - Handles queries with all configuration options\n","- [ ] Created advanced Gradio interface with all required components:\n"," - [ ] Initialize database button\n"," - [ ] Model selection dropdown (gpt-4o, gpt-4o-mini)\n"," - [ ] Temperature slider (0 to 1, step 0.1)\n"," - [ ] Chunk size input (default 512)\n"," - [ ] Chunk overlap input (default 50)\n"," - [ ] Similarity top-k slider (1 to 20, default 5)\n"," - [ ] Node postprocessor multiselect\n"," - [ ] Similarity cutoff slider (0.0 to 1.0, step 0.1, default 0.3)\n"," - [ ] Response synthesizer dropdown\n"," - [ ] Query input and submit button\n"," - [ ] Response output\n"," - [ ] Configuration display\n","- [ ] Connected all components to backend functions\n","- [ ] Successfully launched the application\n","- [ ] Tested different parameter combinations\n","- [ ] Verified all configuration options work correctly\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built a professional, production-ready RAG application! You now have:\n","\n","- **Advanced Parameter Control**: Full control over all RAG system parameters\n","- **Professional UI**: Clean, organized interface with proper layout\n","- **Real-time Configuration**: Ability to experiment with different settings\n","- **Production Patterns**: Understanding of how to build scalable AI applications\n","\n","## ๐Ÿš€ Next Steps & Extensions\n","\n","**Potential Enhancements:**\n","1. **Authentication**: Add user login and session management\n","2. **Document Upload**: Allow users to upload their own documents\n","3. **Chat History**: Implement conversation memory\n","4. **Performance Monitoring**: Add response time and quality metrics\n","5. **A/B Testing**: Compare different configurations side-by-side\n","6. **Export Features**: Download responses and configurations\n","7. **Advanced Visualizations**: Show document similarity scores and retrieval paths\n","\n","**Deployment Options:**\n","- **Local**: Run on your machine for development\n","- **Gradio Cloud**: Deploy with `interface.launch(share=True)`\n","- **Hugging Face Spaces**: Deploy to Hugging Face for public access\n","- **Docker**: Containerize for scalable deployment\n","- **Cloud Platforms**: Deploy to AWS, GCP, or Azure\n","\n","You're now ready to build sophisticated AI-powered applications!\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"e555075b3e35411da8a8fa2a151d071c":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_cacc05334346456fba8750702fa72804","IPY_MODEL_1e0b3679aacd4fee8f374db0a9c80c6e","IPY_MODEL_6895b036178344a8b4288bc9622beee9"],"layout":"IPY_MODEL_76146673913a4dfc8058c1ba15b5dc0a"}},"cacc05334346456fba8750702fa72804":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_afe9e823311d4f8f98fe74c09b4bc65c","placeholder":"โ€‹","style":"IPY_MODEL_52328b934aa64d0ea8f48d5ee08c4d6f","value":"modules.json:โ€‡100%"}},"1e0b3679aacd4fee8f374db0a9c80c6e":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_2581605a93414b8fad1c6917c9e62041","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_bbf6c319eb91480fa1dd9a735b62117d","value":349}},"6895b036178344a8b4288bc9622beee9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5c27b0f135394065919c1aa99328bb74","placeholder":"โ€‹","style":"IPY_MODEL_e7f0690ca1c94934976ec486dc37072f","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡28.3kB/s]"}},"76146673913a4dfc8058c1ba15b5dc0a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"afe9e823311d4f8f98fe74c09b4bc65c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"52328b934aa64d0ea8f48d5ee08c4d6f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2581605a93414b8fad1c6917c9e62041":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bbf6c319eb91480fa1dd9a735b62117d":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5c27b0f135394065919c1aa99328bb74":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e7f0690ca1c94934976ec486dc37072f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5da85a1d4aec45f4a7d438187785f2e7":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_df63701c3c9540278f9e1f9ec72a05cf","IPY_MODEL_007f597dd2e74066b2749182d3e77cbd","IPY_MODEL_13231d4eb7b24594997fdad377e9a39f"],"layout":"IPY_MODEL_0749219f0b4f4d5685b5d1fa2744e5c0"}},"df63701c3c9540278f9e1f9ec72a05cf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0b6f2f52082f44ecb213ad01fda25649","placeholder":"โ€‹","style":"IPY_MODEL_554fe1d4de3b4140a1bc8bc73f32aad5","value":"config_sentence_transformers.json:โ€‡100%"}},"007f597dd2e74066b2749182d3e77cbd":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_9c0700eb9c1345219ead5383c7a5c7c4","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_0b2a765d805342d0a2b05c6da90d8db4","value":124}},"13231d4eb7b24594997fdad377e9a39f":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e955920cb26a440aad247bd03928a99d","placeholder":"โ€‹","style":"IPY_MODEL_85e9c9e790b04f3fb0babf5d36697b57","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡11.7kB/s]"}},"0749219f0b4f4d5685b5d1fa2744e5c0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b6f2f52082f44ecb213ad01fda25649":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"554fe1d4de3b4140a1bc8bc73f32aad5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9c0700eb9c1345219ead5383c7a5c7c4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b2a765d805342d0a2b05c6da90d8db4":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e955920cb26a440aad247bd03928a99d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"85e9c9e790b04f3fb0babf5d36697b57":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"826e3e3fdd7c44f8ae069ac8cb179b57":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_88d753427218445e8970c92618c9e8ac","IPY_MODEL_b9df30b27c4d47dca2b18340de6f3b56","IPY_MODEL_3be7ef3867e54a7d98fc7b97de945d35"],"layout":"IPY_MODEL_b728eeea9bbb4d6f8420486e76a27ed8"}},"88d753427218445e8970c92618c9e8ac":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2d4f4bda9d1741b7a1fd6949a3570e3c","placeholder":"โ€‹","style":"IPY_MODEL_5d7ed8c9b4bc4129baec1ce03527bd10","value":"README.md:โ€‡"}},"b9df30b27c4d47dca2b18340de6f3b56":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_4c3ad25ebbf54603b5d3e9313c742ac5","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2ce5e030c5824644bd20b0f997faf3ff","value":1}},"3be7ef3867e54a7d98fc7b97de945d35":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2bdbf39069564c43a84eb87813cae3ab","placeholder":"โ€‹","style":"IPY_MODEL_df80b039fd1f4771a0e71bfafc219033","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡4.38MB/s]"}},"b728eeea9bbb4d6f8420486e76a27ed8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2d4f4bda9d1741b7a1fd6949a3570e3c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5d7ed8c9b4bc4129baec1ce03527bd10":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4c3ad25ebbf54603b5d3e9313c742ac5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"2ce5e030c5824644bd20b0f997faf3ff":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2bdbf39069564c43a84eb87813cae3ab":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"df80b039fd1f4771a0e71bfafc219033":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b5451d4ea98245509c782b8fe1d25cfc":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_efb15bc3d2524078ae47ed2b17099e2c","IPY_MODEL_ce3a6a42427b47ddb8dec0b86b803e9d","IPY_MODEL_e4e9564b34ff49eba064a27481b5d1a7"],"layout":"IPY_MODEL_f1d99a3490034b77a72732c83785064d"}},"efb15bc3d2524078ae47ed2b17099e2c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_49f0e8f020a2492ba68e874f4c00fe67","placeholder":"โ€‹","style":"IPY_MODEL_04a5bde86d3b4059a1e1f8a55199c7d8","value":"sentence_bert_config.json:โ€‡100%"}},"ce3a6a42427b47ddb8dec0b86b803e9d":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_366da23ff9c2454daddcb04eb9f416c0","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b6b286c77de44f869f22e1fc3773005c","value":52}},"e4e9564b34ff49eba064a27481b5d1a7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b58a7568a3f94b56876e77ed3576fe76","placeholder":"โ€‹","style":"IPY_MODEL_6d82cf3cdf9e4779975a7849593c5e3a","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡4.65kB/s]"}},"f1d99a3490034b77a72732c83785064d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"49f0e8f020a2492ba68e874f4c00fe67":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"04a5bde86d3b4059a1e1f8a55199c7d8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"366da23ff9c2454daddcb04eb9f416c0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b6b286c77de44f869f22e1fc3773005c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b58a7568a3f94b56876e77ed3576fe76":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6d82cf3cdf9e4779975a7849593c5e3a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c218eae8906949508dc7fdd629c9e6c3":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_b6b8c9d1dba7435582d156aee18505a4","IPY_MODEL_9bd3f34817134982bdee81d46a839ae5","IPY_MODEL_5bbc689e0eb3486595f41c1aed1351ea"],"layout":"IPY_MODEL_03a5f56c249448cb8a9c00963576195c"}},"b6b8c9d1dba7435582d156aee18505a4":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1a5c47d3aef547308d8cf4178b3d277d","placeholder":"โ€‹","style":"IPY_MODEL_c4ba4f2a1daf4233a94e64462601d006","value":"config.json:โ€‡100%"}},"9bd3f34817134982bdee81d46a839ae5":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_cc94e340844d461caac8491af4171a0f","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_6b179591713b436c9627d102187dd0e2","value":743}},"5bbc689e0eb3486595f41c1aed1351ea":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ec3fd1520af74520a7a6a5a00fc78035","placeholder":"โ€‹","style":"IPY_MODEL_f4f62182f094460baf830448f3d016a5","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡67.3kB/s]"}},"03a5f56c249448cb8a9c00963576195c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1a5c47d3aef547308d8cf4178b3d277d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c4ba4f2a1daf4233a94e64462601d006":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"cc94e340844d461caac8491af4171a0f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6b179591713b436c9627d102187dd0e2":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ec3fd1520af74520a7a6a5a00fc78035":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f4f62182f094460baf830448f3d016a5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bd76ef35f6034a0faa3eafdd92268b65":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_1b909e7746564a06af03d038b1f9a028","IPY_MODEL_7bfbc726c13343c9af40bc4843cf6e88","IPY_MODEL_a014631e0033471a8281112845e82e77"],"layout":"IPY_MODEL_1bbae592d24d4af9b635c6c01a783b71"}},"1b909e7746564a06af03d038b1f9a028":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7f6d51e4097347219ee26cfc8bfc7487","placeholder":"โ€‹","style":"IPY_MODEL_8bf2267284124019b5db59f5b74ec0fb","value":"model.safetensors:โ€‡100%"}},"7bfbc726c13343c9af40bc4843cf6e88":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_fc3b98227c2340519a308e83456d0548","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_3be865e0fbe1479d950f208ba22c697c","value":133466304}},"a014631e0033471a8281112845e82e77":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_59ba8c325ccb45f9b61b973f19926d33","placeholder":"โ€‹","style":"IPY_MODEL_7aad31231ba54d6183b865df565d2f15","value":"โ€‡133M/133Mโ€‡[00:01<00:00,โ€‡119MB/s]"}},"1bbae592d24d4af9b635c6c01a783b71":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7f6d51e4097347219ee26cfc8bfc7487":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8bf2267284124019b5db59f5b74ec0fb":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"fc3b98227c2340519a308e83456d0548":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3be865e0fbe1479d950f208ba22c697c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"59ba8c325ccb45f9b61b973f19926d33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7aad31231ba54d6183b865df565d2f15":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"022a6a5527534d6492f1853944362cab":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_dd05f368aaae4881beda9caf48beb67d","IPY_MODEL_5c8b5945e01743719b75550e8ae2234d","IPY_MODEL_cb37c858fa744c64ab659ff78e3a9fb7"],"layout":"IPY_MODEL_5e108b1d57e24a48938cd4fdc4e2f90d"}},"dd05f368aaae4881beda9caf48beb67d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7575469fb0a34897bffa23cf6e1959ee","placeholder":"โ€‹","style":"IPY_MODEL_ddce348992f1480cacede7855bd69e0b","value":"tokenizer_config.json:โ€‡100%"}},"5c8b5945e01743719b75550e8ae2234d":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ccf49349f1b54a45986d54df8cdf06a6","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_a4e3d8eaae084d41a49aece4ce89cb31","value":366}},"cb37c858fa744c64ab659ff78e3a9fb7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ad7b2c8da94a4125adcae2d18ff632b6","placeholder":"โ€‹","style":"IPY_MODEL_5f02b68046014a6b8482a3814c5ee44a","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡19.0kB/s]"}},"5e108b1d57e24a48938cd4fdc4e2f90d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7575469fb0a34897bffa23cf6e1959ee":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ddce348992f1480cacede7855bd69e0b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ccf49349f1b54a45986d54df8cdf06a6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a4e3d8eaae084d41a49aece4ce89cb31":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ad7b2c8da94a4125adcae2d18ff632b6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5f02b68046014a6b8482a3814c5ee44a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bc86fc01faf04f0b8e8982dde6aebfee":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_22ed11bcf43c41f2a6813eae2c5fbfb5","IPY_MODEL_2f6e11fed22842b1a57153781b773758","IPY_MODEL_fcb7052b94fc47f1b7734de1568e17f0"],"layout":"IPY_MODEL_42ad5a129ec844eebe6009fbaeb8c475"}},"22ed11bcf43c41f2a6813eae2c5fbfb5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_61cda27f064b484aa9446e74cef059d2","placeholder":"โ€‹","style":"IPY_MODEL_fc94e5ee2e5e40a7abfc8595fa1b8bba","value":"vocab.txt:โ€‡"}},"2f6e11fed22842b1a57153781b773758":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_9ced3f5dc0c341e39f156a51a6de3162","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4283b4daac544179ba8d2ac59a6a890a","value":1}},"fcb7052b94fc47f1b7734de1568e17f0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_10f5e0c42d864e2282a393cf169df9b8","placeholder":"โ€‹","style":"IPY_MODEL_2df9ec0f953445d397128e084c33c738","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡3.62MB/s]"}},"42ad5a129ec844eebe6009fbaeb8c475":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"61cda27f064b484aa9446e74cef059d2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fc94e5ee2e5e40a7abfc8595fa1b8bba":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9ced3f5dc0c341e39f156a51a6de3162":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"4283b4daac544179ba8d2ac59a6a890a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"10f5e0c42d864e2282a393cf169df9b8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2df9ec0f953445d397128e084c33c738":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"01e6ba1052dc4b939e2116563f3b2df5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_9bb7052ed725400ab5a0a050a3f62e51","IPY_MODEL_5bec150283b340ee8c30834c5a1fbaa5","IPY_MODEL_ed69cd4fb29744719b6291e34cdcc8ef"],"layout":"IPY_MODEL_cf994cff745c40b986c387f5bdf1a429"}},"9bb7052ed725400ab5a0a050a3f62e51":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_dee358c514304f40a092cfddee6cd4ff","placeholder":"โ€‹","style":"IPY_MODEL_0696f75acf1f4014a565de28ae7c8462","value":"tokenizer.json:โ€‡"}},"5bec150283b340ee8c30834c5a1fbaa5":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_d7347f2c6fa2425588106b7e2a9699f0","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b2d78955677045ef8b43f934aaabfd97","value":1}},"ed69cd4fb29744719b6291e34cdcc8ef":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_61356446b8174f93b6c045e7c0741b0b","placeholder":"โ€‹","style":"IPY_MODEL_005afce015354a6fa0dacf9a01bba361","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡29.1MB/s]"}},"cf994cff745c40b986c387f5bdf1a429":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dee358c514304f40a092cfddee6cd4ff":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0696f75acf1f4014a565de28ae7c8462":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d7347f2c6fa2425588106b7e2a9699f0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"b2d78955677045ef8b43f934aaabfd97":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"61356446b8174f93b6c045e7c0741b0b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"005afce015354a6fa0dacf9a01bba361":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a999fe81cf984effb2f621da0b338823":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_27e0a306b76d4ec4a50290ccfdd7755a","IPY_MODEL_539f802d01ef45ecb3c750616d36f3e8","IPY_MODEL_057558df75e947a8af2e5a4538d3c26c"],"layout":"IPY_MODEL_ae265553fe7d4b33b131c6d46ce02f44"}},"27e0a306b76d4ec4a50290ccfdd7755a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_00dd28179d6a41d8812aafda5135507c","placeholder":"โ€‹","style":"IPY_MODEL_5cb1be0cd24141f1a32e86b9ea09ec86","value":"special_tokens_map.json:โ€‡100%"}},"539f802d01ef45ecb3c750616d36f3e8":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_282fa42f92e84c4c9a2a886d5d85df4a","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_54382bef09384ff29564c3d7b6c5ea89","value":125}},"057558df75e947a8af2e5a4538d3c26c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e6d8e6a9d954452bb8adb8d3d101939d","placeholder":"โ€‹","style":"IPY_MODEL_5df80152d588453ea46ae6c73eac0a19","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡11.9kB/s]"}},"ae265553fe7d4b33b131c6d46ce02f44":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"00dd28179d6a41d8812aafda5135507c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5cb1be0cd24141f1a32e86b9ea09ec86":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"282fa42f92e84c4c9a2a886d5d85df4a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"54382bef09384ff29564c3d7b6c5ea89":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e6d8e6a9d954452bb8adb8d3d101939d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5df80152d588453ea46ae6c73eac0a19":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"105d74d437fd4d19a2beab025055b969":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_58679b3e995b413298ccceba14c5cee5","IPY_MODEL_702bc5c5d42a48faa14ee2a661471b2c","IPY_MODEL_827b227da03a4f39b4eebf2ebca7f147"],"layout":"IPY_MODEL_3b03d89f230747c799418596fdd038cb"}},"58679b3e995b413298ccceba14c5cee5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c6e4f05eceea4daa9f9ed046faa160f6","placeholder":"โ€‹","style":"IPY_MODEL_42e89191795b4c82b3b3970f638950ae","value":"config.json:โ€‡100%"}},"702bc5c5d42a48faa14ee2a661471b2c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3108cef227b24e0499f26f823e89d1f3","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_f883d8d9f738476887c97c94dcf184df","value":190}},"827b227da03a4f39b4eebf2ebca7f147":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_999f8bcc0b1d4083922105f549167f1b","placeholder":"โ€‹","style":"IPY_MODEL_8a1597c0aaa84bb29042275aae7de6f2","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡18.3kB/s]"}},"3b03d89f230747c799418596fdd038cb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c6e4f05eceea4daa9f9ed046faa160f6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"42e89191795b4c82b3b3970f638950ae":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3108cef227b24e0499f26f823e89d1f3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f883d8d9f738476887c97c94dcf184df":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"999f8bcc0b1d4083922105f549167f1b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8a1597c0aaa84bb29042275aae7de6f2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Monalisa_Samal/README.md b/Monalisa_Samal/README.md deleted file mode 100644 index 417844f..0000000 --- a/Monalisa_Samal/README.md +++ /dev/null @@ -1 +0,0 @@ -# Monalisa_Samal diff --git a/Monalisa_Samal/assignment_1_vector_db_basics.ipynb b/Monalisa_Samal/assignment_1_vector_db_basics.ipynb deleted file mode 100644 index ee1c097..0000000 --- a/Monalisa_Samal/assignment_1_vector_db_basics.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"rX4VtY8Gc4Aa"},"source":["# Assignment 1: Vector Database Creation and Retrieval\n","## Day 6 Session 2 - RAG Fundamentals\n","\n","**OBJECTIVE:** Create a vector database from a folder of documents and implement basic retrieval functionality.\n","\n","**LEARNING GOALS:**\n","- Understand document loading with SimpleDirectoryReader\n","- Learn vector store setup with LanceDB\n","- Implement vector index creation\n","- Perform semantic search and retrieval\n","\n","**DATASET:** Use the data folder in `Day_6/session_2/data/` which contains multiple file types\n","\n","**INSTRUCTIONS:**\n","1. Complete each function by replacing the TODO comments with actual implementation\n","2. Run each cell after completing the function to test it\n","3. The answers can be found in the existing notebooks in the `llamaindex_rag/` folder\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"mGtEGJgKdEty","executionInfo":{"status":"ok","timestamp":1762068951338,"user_tz":-330,"elapsed":31991,"user":{"displayName":"","userId":""}},"outputId":"3f9858b0-fd42-41ae-bddc-ed6cbd3baca5"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["# If it's in a specific folder (e.g., \"Projects/MyProject/\")\n","!pip install -r '/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"L4iAVZ0Yf0g-","executionInfo":{"status":"ok","timestamp":1762069356067,"user_tz":-330,"elapsed":49537,"user":{"displayName":"","userId":""}},"outputId":"809ee24f-a3e1-4d97-c939-fab5baf825ff"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m15.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m9.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m35.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m101.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m65.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m83.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m20.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m12.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m95.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m21.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m10.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=be017bdd70010807e561385dfd0514f077b15200b867f9378992997bd48a9fba\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"bf6145c7209b49e19021b33f4c254a27"}},"metadata":{}}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"gtmrrU8ic4Ad","executionInfo":{"status":"ok","timestamp":1762069483035,"user_tz":-330,"elapsed":51999,"user":{"displayName":"","userId":""}},"outputId":"eff993f8-ffe4-47bc-a240-6b7070bb2425"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import os\n","from pathlib import Path\n","from typing import List\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","\n","print(\"โœ… Libraries imported successfully!\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"IhBUV3rEc4Ae","executionInfo":{"status":"ok","timestamp":1762069961873,"user_tz":-330,"elapsed":2079,"user":{"displayName":"","userId":""}},"outputId":"7add2c19-d99f-49d6-e07d-e87cdc20e07d"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OpenRouter API key found in Colab secrets\n","โœ… LlamaIndex configured with local embeddings\n"," Using BAAI/bge-small-en-v1.5 for document embeddings\n"]}],"source":["# Configure LlamaIndex Settings (Using OpenRouter - No OpenAI API Key needed)\n","# Configure LlamaIndex Settings (Using OpenRouter - No OpenAI API Key needed)\n","def setup_llamaindex_settings():\n"," \"\"\"\n"," Configure LlamaIndex with local embeddings and OpenRouter for LLM.\n"," This assignment focuses on vector database operations, so we'll use local embeddings only.\n"," \"\"\"\n"," # Check for OpenRouter API key (for future use, not needed for this basic assignment)\n"," from google.colab import userdata\n","\n"," try:\n"," api_key = userdata.get('OPEN_ROUTER') # or whatever you named your secret\n"," print(\"โœ… OpenRouter API key found in Colab secrets\")\n"," except Exception:\n"," print(\"โ„น๏ธ OPENROUTER_API_KEY not found - that's OK for this assignment!\")\n"," print(\" This assignment only uses local embeddings for vector operations.\")\n","\n"," # Configure local embeddings (no API key required)\n"," Settings.embed_model = HuggingFaceEmbedding(\n"," model_name=\"BAAI/bge-small-en-v1.5\",\n"," trust_remote_code=True\n"," )\n","\n"," print(\"โœ… LlamaIndex configured with local embeddings\")\n"," print(\" Using BAAI/bge-small-en-v1.5 for document embeddings\")\n","\n","# Setup the configuration\n","setup_llamaindex_settings()"]},{"cell_type":"markdown","metadata":{"id":"tgAR2pDkc4Af"},"source":["## 1. Document Loading Function\n","\n","Complete the function below to load documents from a folder using `SimpleDirectoryReader`.\n","\n","**Note:** This assignment uses local embeddings only - no OpenAI API key required! We're configured to use OpenRouter for future LLM operations.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"afKvr3CHc4Ag","executionInfo":{"status":"ok","timestamp":1762070845711,"user_tz":-330,"elapsed":65372,"user":{"displayName":"","userId":""}},"outputId":"440cfccb-19b3-4b8a-fa53-556cea45660f"},"outputs":[{"output_type":"stream","name":"stderr","text":["100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 139M/139M [00:00<00:00, 157MiB/s]\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n"]},{"output_type":"stream","name":"stdout","text":["TODO: Load documents from /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/data\n","Loaded 42 documents\n"]}],"source":["from llama_index.core import SimpleDirectoryReader\n","\n","def load_documents_from_folder(folder_path: str):\n"," \"\"\"\n"," Load documents from a folder using SimpleDirectoryReader.\n","\n"," TODO: Complete this function to load documents from the given folder path.\n"," HINT: Use SimpleDirectoryReader with recursive parameter to load all files\n","\n"," Args:\n"," folder_path (str): Path to the folder containing documents\n","\n"," Returns:\n"," List of documents loaded from the folder\n"," \"\"\"\n"," # TODO: Create SimpleDirectoryReader instance\n"," reader = SimpleDirectoryReader(input_dir=folder_path, recursive=True)\n","\n"," # TODO: Load and return documents\n"," documents = reader.load_data()\n","\n","\n"," # PLACEHOLDER - Replace with actual implementation\n"," print(f\"TODO: Load documents from {folder_path}\")\n","\n"," return documents\n","\n","# Test the function after you complete it\n","test_folder = \"/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/data\"\n","documents = load_documents_from_folder(test_folder)\n","print(f\"Loaded {len(documents)} documents\")\n"]},{"cell_type":"markdown","metadata":{"id":"DZ2L7z7ac4Ag"},"source":["## 2. Vector Store Creation Function\n","\n","Complete the function below to create a LanceDB vector store.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"m0KBU3uUc4Ah","executionInfo":{"status":"ok","timestamp":1762071429191,"user_tz":-330,"elapsed":323,"user":{"displayName":"","userId":""}},"outputId":"0af919ab-ec14-456e-dfff-f526b3e166b3"},"outputs":[{"output_type":"stream","name":"stderr","text":["WARNING:llama_index.vector_stores.lancedb.base:Table documents doesn't exist yet. Please add some data to create it.\n"]},{"output_type":"stream","name":"stdout","text":["TODO: Create vector store at ./assignment_vectordb\n","Vector store created: True\n"]}],"source":["def create_vector_store(db_path: str = \"./vectordb\", table_name: str = \"documents\"):\n"," \"\"\"\n"," Create a LanceDB vector store for storing document embeddings.\n","\n"," TODO: Complete this function to create and configure a LanceDB vector store.\n"," HINT: Use LanceDBVectorStore with uri and table_name parameters\n","\n"," Args:\n"," db_path (str): Path where the vector database will be stored\n"," table_name (str): Name of the table in the vector database\n","\n"," Returns:\n"," LanceDBVectorStore: Configured vector store\n"," \"\"\"\n"," # TODO: Create the directory if it doesn't exist\n"," Path(db_path).mkdir(parents=True, exist_ok=True)\n","\n"," # TODO: Create vector store\n"," vector_store = LanceDBVectorStore (\n"," uri=db_path,\n"," table_name=table_name\n"," )\n","\n"," # PLACEHOLDER - Replace with actual implementation\n"," print(f\"TODO: Create vector store at {db_path}\")\n","\n"," return vector_store\n","\n","\n","\n","# Test the function after you complete it\n","vector_store = create_vector_store(\"./assignment_vectordb\")\n","print(f\"Vector store created: {vector_store is not None}\")\n"]},{"cell_type":"markdown","metadata":{"id":"g3X5bjtIc4Ah"},"source":["## 3. Vector Index Creation Function\n","\n","Complete the function below to create a vector index from documents.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":116,"referenced_widgets":["c800f798befc49ec85ac21296ae3ede3","b6b184be73b64f52b5229efca5b7b535","a06c7b0401ab42658f5fceeb02719936","4c709bca2e5b45e18c1052b1443040b9","a2dd38f7ea6a4b0aaf4fa9d9d75bb2f7","7fb84c2a901a4d298d3733a3e3067a07","288422a6ec0d4266b9aa42f9a813fd1f","ed98766b22fb44f1a9b4099e6f5c6e8b","7381b62a3d3b4040adf365e00ce5d0ee","bc98f581514549cabc2bac83d017cf5a","f368138346d04fffb0a7fa1faa5986ec","9a3658c2ed064f579537fe277cc81243","5e1d55b3cf2a442e82108bedb94ac345","e0e0fef0e6934b728298d880f3fcfc24","d0dd192a6edb42d2ab6c3e1e96843a6a","2d06766fb0f14899a1c96b11bf04beb3","2b1872425a894d6298193dfa433fe76d","c50c88d75b9b41fdaa8d66fc24b3391f","2619ccd9ecb14392a78eb8469fd7de39","bef8337bdbcf451995c6ae2db1c94e68","b8a2dc32c1d44f3ebe8fa919aa69ccdc","4fb1db5731ed4e1dbf9f0c159febd03f"]},"id":"cDeK2JhSc4Ai","executionInfo":{"status":"ok","timestamp":1762071914576,"user_tz":-330,"elapsed":42292,"user":{"displayName":"","userId":""}},"outputId":"9cd990d6-1874-46ea-9a12-4e1afd55750a"},"outputs":[{"output_type":"display_data","data":{"text/plain":["Parsing nodes: 0%| | 0/42 [00:00 402\u001b[0;31m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 403\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mHTTPError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/requests/models.py\u001b[0m in \u001b[0;36mraise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1025\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhttp_error_msg\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1026\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mHTTPError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhttp_error_msg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1027\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mHTTPError\u001b[0m: 401 Client Error: Unauthorized for url: https://huggingface.co/api/whoami-v2","\nThe above exception was the direct cause of the following exception:\n","\u001b[0;31mHfHubHTTPError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_api.py\u001b[0m in \u001b[0;36mwhoami\u001b[0;34m(self, token)\u001b[0m\n\u001b[1;32m 1799\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1800\u001b[0;31m \u001b[0mhf_raise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1801\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mHTTPError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_http.py\u001b[0m in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 474\u001b[0m \u001b[0;31m# as well (request id and/or server error message)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 475\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0m_format\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mHfHubHTTPError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 476\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mHfHubHTTPError\u001b[0m: 401 Client Error: Unauthorized for url: https://huggingface.co/api/whoami-v2 (Request ID: Root=1-69075383-79acb4c72606ee2b66a2018e;4b91b747-f307-4671-9ee1-87bbb3f6fe74)\n\nInvalid credentials in Authorization header","\nThe above exception was the direct cause of the following exception:\n","\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipython-input-2265489888.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# Use with Hugging Face libraries\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mhuggingface_hub\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mlogin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mlogin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhf_token\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_deprecation.py\u001b[0m in \u001b[0;36minner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;34m\"\\n\\n\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mcustom_message\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0mwarnings\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFutureWarning\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 101\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 102\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0minner_f\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_deprecation.py\u001b[0m in \u001b[0;36minner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mall_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 31\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 32\u001b[0m \u001b[0;31m# extra_args > 0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m args_msg = [\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/_login.py\u001b[0m in \u001b[0;36mlogin\u001b[0;34m(token, add_to_git_credential, new_session, write_permission)\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;34m\"you want to set the git credential as well.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m )\n\u001b[0;32m--> 120\u001b[0;31m \u001b[0m_login\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0madd_to_git_credential\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0madd_to_git_credential\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 121\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mis_notebook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 122\u001b[0m \u001b[0mnotebook_login\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnew_session\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnew_session\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/_login.py\u001b[0m in \u001b[0;36m_login\u001b[0;34m(token, add_to_git_credential)\u001b[0m\n\u001b[1;32m 396\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"You must use your personal account token, not an organization token.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 398\u001b[0;31m \u001b[0mtoken_info\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwhoami\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtoken\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 399\u001b[0m \u001b[0mpermission\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtoken_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"auth\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"accessToken\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"role\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 400\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"Token is valid (permission: {permission}).\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_validators.py\u001b[0m in \u001b[0;36m_inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msmoothly_deprecate_use_auth_token\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhas_token\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhas_token\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 114\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 115\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_inner_fn\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_api.py\u001b[0m in \u001b[0;36mwhoami\u001b[0;34m(self, token)\u001b[0m\n\u001b[1;32m 1812\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0meffective_token\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0m_get_token_from_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1813\u001b[0m \u001b[0merror_message\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;34m\" The token stored is invalid. Please run `hf auth login` to update it.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1814\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mHTTPError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merror_message\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1815\u001b[0m \u001b[0;32mraise\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1816\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjson\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mHTTPError\u001b[0m: Invalid user token. The token from Google Colab vault is invalid. Please update it from the UI."]}]},{"cell_type":"markdown","metadata":{"id":"En2F-NY_c4Aj"},"source":["## 5. Final Test - Complete Pipeline\n","\n","Once you've completed all the functions above, run this cell to test the complete pipeline with multiple search queries.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":986,"referenced_widgets":["427e4b135571428fa45bbc1684856feb","6ee97068ef7b45e9a3886ab57b783a62","5eff01a40dbe40f3bd4b495fe52e6b36","8a7eb64cae22471787346d0b8fa86d25","6223159a14db42b5a8ad8ab9c6fc9b17","bf26c6f946a9479c89d02dedd73f6a20","8e98cab9140a4910a45033c3463eede1","8df3dcb6ea8f47f58c01f8d7417a35c7","ab59301ad2ea4f9bb90daf49d9c4e179","8e9f91584fa346449b267d3feb8d1de9","be0f0f881eca4f0799cbb541394b4867","56bf583baaf8410793f299b92f4e404a","bac0bf1f765a41819f13d92d130911fa","4c79b48a43b9486fa3fd89de29d1b585","a0c79394e0f74ce7a64274f8259ba07e","f28fcef72b384819853aab8ddc0fa588","3ce9e43b6a784ae0a90a0bb87f51d2c4","d3230baafa0a4e1f91f1ad9c11087b92","4a01d57a70a546c0b147a3021f1a4702","49dcb29cc0214e83ba0cc8ead707d6b7","94a9026cc60443be952cfd211f66950d","f93e7ec59b4b48beb1e5b15099d8cb8a"]},"id":"C4DysfYGc4Aj","executionInfo":{"status":"ok","timestamp":1762072280240,"user_tz":-330,"elapsed":83787,"user":{"displayName":"","userId":""}},"outputId":"0d6fc2fa-35ff-47f5-b267-1548e3c10ba9"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐Ÿš€ Testing Complete Vector Database Pipeline\n","==================================================\n","\n","๐Ÿ“‚ Step 1: Loading documents...\n"]},{"output_type":"stream","name":"stderr","text":["/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n"]},{"output_type":"stream","name":"stdout","text":["TODO: Load documents from /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/data\n"," Loaded 42 documents\n","\n","๐Ÿ—„๏ธ Step 2: Creating vector store...\n","TODO: Create vector store at ./assignment_vectordb\n"," Vector store status: โœ… Created\n","\n","๐Ÿ”— Step 3: Creating vector index...\n"]},{"output_type":"display_data","data":{"text/plain":["Parsing nodes: 0%| | 0/42 [00:001.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (80.9.0)\n","Requirement already satisfied: jedi>=0.16 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.19.2)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Requirement already satisfied: deprecation in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (2.1.0)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Requirement already satisfied: lance-namespace>=0.0.16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (0.0.20)\n","Requirement already satisfied: llama-index-cli<0.6,>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.5.3)\n","Requirement already satisfied: llama-index-core<0.15.0,>=0.14.7 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.14.7)\n","Requirement already satisfied: llama-index-indices-managed-llama-cloud>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.9.4)\n","Requirement already satisfied: llama-index-llms-openai<0.7,>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.6)\n","Requirement already satisfied: llama-index-readers-file<0.6,>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.5.4)\n","Requirement already satisfied: llama-index-readers-llama-parse>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.5.1)\n","Requirement already satisfied: pylance in /usr/local/lib/python3.12/dist-packages (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13)) (0.38.3)\n","Requirement already satisfied: tantivy in /usr/local/lib/python3.12/dist-packages (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13)) (0.25.0)\n","Requirement already satisfied: llama-index-llms-openai-like<0.6,>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17)) (0.5.3)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Requirement already satisfied: lance-namespace-urllib3-client in /usr/local/lib/python3.12/dist-packages (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (0.0.20)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: aiosqlite in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.21.0)\n","Requirement already satisfied: banks<3,>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.2.0)\n","Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.7)\n","Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.18)\n","Requirement already satisfied: dirtyjson<2,>=1.0.8 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.0.8)\n","Requirement already satisfied: filetype<2,>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.0)\n","Requirement already satisfied: llama-index-workflows!=2.9.0,<3,>=2 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.10.2)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.9.0)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.17.3)\n","Requirement already satisfied: llama-cloud==0.1.35 in /usr/local/lib/python3.12/dist-packages (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.1.35)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Requirement already satisfied: pypdf<7,>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (6.1.3)\n","Requirement already satisfied: striprtf<0.0.27,>=0.0.26 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.0.26)\n","Requirement already satisfied: llama-parse>=0.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.54)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Requirement already satisfied: griffe in /usr/local/lib/python3.12/dist-packages (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.14.0)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Requirement already satisfied: llama-index-instrumentation>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.4.2)\n","Requirement already satisfied: llama-cloud-services>=0.6.54 in /usr/local/lib/python3.12/dist-packages (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.6.54)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.12/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.1.0)\n","Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.12/dist-packages (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.26.1)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: colorama>=0.4 in /usr/local/lib/python3.12/dist-packages (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.4.6)\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"wlT1dnXjYchO","executionInfo":{"status":"ok","timestamp":1762089698115,"user_tz":-330,"elapsed":10,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"740c93db-ab39-4f37-edeb-93b9c8580141"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG libraries imported successfully!\n"]}],"source":["# Import required libraries for advanced RAG\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","from pydantic import BaseModel, Field\n","\n","# Core LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.core.query_engine import RetrieverQueryEngine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","# Vector store\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","\n","# Embeddings and LLM\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components (we'll use these in the assignments)\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.output_parsers import PydanticOutputParser\n","\n","print(\"โœ… Advanced RAG libraries imported successfully!\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"R_3uIzeMYchP","executionInfo":{"status":"ok","timestamp":1762089706600,"user_tz":-330,"elapsed":3209,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"42778bab-2cb4-44d2-a7c8-0d03aa468ae9"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OpenRouter API key found in Colab secrets\n","โœ… Advanced RAG settings configured\n"," - Chunk size: 512 (optimized for precision)\n"," - Using local embeddings for cost efficiency\n"," - OpenRouter LLM ready for response synthesis\n"]}],"source":["# Configure Advanced RAG Settings (Using OpenRouter)\n","def setup_advanced_rag_settings():\n"," \"\"\"\n"," Configure LlamaIndex with optimized settings for advanced RAG.\n"," Uses local embeddings and OpenRouter for LLM operations.\n"," \"\"\"\n"," # Check for OpenRouter API key\n","\n"," from google.colab import userdata\n","\n"," try:\n"," api_key = userdata.get('OPEN_ROUTER') # your named your secret\n"," print(\"โœ… OpenRouter API key found in Colab secrets\")\n"," except Exception:\n"," print(\"โ„น๏ธ OPENROUTER_API_KEY not found - that's OK for this assignment!\")\n"," print(\" This assignment only uses local embeddings for vector operations.\")\n","\n","\n"," # Configure OpenRouter LLM\n"," Settings.llm = OpenRouter(\n"," api_key=api_key,\n"," model=\"gpt-4o\",\n"," temperature=0.1 # Lower temperature for more consistent responses\n"," )\n","\n"," # Configure local embeddings (no API key required)\n"," Settings.embed_model = HuggingFaceEmbedding(\n"," model_name=\"BAAI/bge-small-en-v1.5\",\n"," trust_remote_code=True\n"," )\n","\n"," # Advanced RAG configuration\n"," Settings.chunk_size = 512 # Smaller chunks for better precision\n"," Settings.chunk_overlap = 50\n","\n"," print(\"โœ… Advanced RAG settings configured\")\n"," print(\" - Chunk size: 512 (optimized for precision)\")\n"," print(\" - Using local embeddings for cost efficiency\")\n"," print(\" - OpenRouter LLM ready for response synthesis\")\n","\n","# Setup the configuration\n","setup_advanced_rag_settings()\n"]},{"cell_type":"code","source":["from google.colab import userdata\n","import os\n","\n","# Get the token from Colab secrets\n","hf_token = userdata.get('HF_TOKEN')\n","\n","# Set as environment variable (optional)\n","os.environ['HF_TOKEN'] = hf_token\n","\n","# Use with Hugging Face libraries\n","from huggingface_hub import login\n","login(token=hf_token)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"J7mg0QC3nmgs","executionInfo":{"status":"ok","timestamp":1762089712808,"user_tz":-330,"elapsed":899,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"4fc230c4-a763-4c1a-b640-85f2e1e67f42"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stderr","text":["Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n","WARNING:huggingface_hub._login:Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":255,"referenced_widgets":["a9d4ddd2138e497f9050b59d4a579687","28265196138e4fc2b71a6f28b6c9ae0c","a341f62d1b374c63b71f478afe421377","1a852a69c3274338bd6634fb0c16a457","d1a87baee2b34ff39294a9957609ae33","0b6a9975c3a84f048b3b34e678c1a3d8","70441a90759e419fbc118642ee3afeb8","e80346f88fa049f9bc4690b520faeadf","2607a80a19ef469a852a574dde7deec9","752bdeb9460947cdb9731e87828b93c7","a9a4ffd40f97484fa39374570f6fd006","18aba1e55c084b45a59d723c920649db","a8395d6fb02e49cb8062308a40db32b8","4d2373e1731b46f6960ae50e48ea2d33","cc18d8a0b8904643a0cd1f13b7ba8bd3","e051a3be3c35478cbd343dbacdd48acf","a011adb7c30245deb082532acfd038cb","4208e397a3a7458aa5b16c91bc7b5ac8","f1c05ad90ee74a1fbb77d618ff193971","656d18ae543a463ea98145be9cc6771e","6141e0010d114134ba65680c86be8720","c86f10069df241d3adfcf0f884a61389"]},"id":"IseBtxKeYchP","executionInfo":{"status":"ok","timestamp":1762089804808,"user_tz":-330,"elapsed":87443,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"467cb814-9deb-4ca1-a7ea-1d41a4dd38f0"},"outputs":[{"output_type":"stream","name":"stderr","text":["/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n","/usr/local/lib/python3.12/dist-packages/whisper/transcribe.py:132: UserWarning: FP16 is not supported on CPU; using FP32 instead\n"," warnings.warn(\"FP16 is not supported on CPU; using FP32 instead\")\n"]},{"output_type":"display_data","data":{"text/plain":["Parsing nodes: 0%| | 0/42 [00:00=0.13.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-llms-huggingface) (0.14.7)\n","Requirement already satisfied: torch<3,>=2.1.2 in /usr/local/lib/python3.12/dist-packages (from llama-index-llms-huggingface) (2.8.0+cu126)\n","Requirement already satisfied: transformers<5,>=4.37.0 in /usr/local/lib/python3.12/dist-packages (from transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (4.57.1)\n","Requirement already satisfied: aiohttp<4,>=3.8.6 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.13.1)\n","Requirement already satisfied: aiosqlite in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.21.0)\n","Requirement already satisfied: banks<3,>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.2.0)\n","Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.6.7)\n","Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.2.18)\n","Requirement already satisfied: dirtyjson<2,>=1.0.8 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.0.8)\n","Requirement already satisfied: filetype<2,>=1.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.2.0)\n","Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2025.3.0)\n","Requirement already satisfied: httpx in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.28.1)\n","Requirement already satisfied: llama-index-workflows!=2.9.0,<3,>=2 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.10.2)\n","Requirement already satisfied: nest-asyncio<2,>=1.5.8 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.6.0)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.5)\n","Requirement already satisfied: nltk>3.8.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.0.2)\n","Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (11.3.0)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.5.0)\n","Requirement already satisfied: pydantic>=2.8.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.11.10)\n","Requirement already satisfied: pyyaml>=6.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (6.0.3)\n","Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.32.4)\n","Requirement already satisfied: setuptools>=80.9.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (80.9.0)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (8.5.0)\n","Requirement already satisfied: tiktoken>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.12.0)\n","Requirement already satisfied: tqdm<5,>=4.66.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.67.1)\n","Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.15.0)\n","Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.9.0)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.17.3)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (3.20.0)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (1.13.3)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (3.1.6)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (1.11.1.6)\n","Requirement already satisfied: triton==3.4.0 in /usr/local/lib/python3.12/dist-packages (from torch<3,>=2.1.2->llama-index-llms-huggingface) (3.4.0)\n","Requirement already satisfied: huggingface-hub<1.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (0.36.0)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (25.0)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (2024.11.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (0.6.2)\n","Requirement already satisfied: accelerate>=0.26.0 in /usr/local/lib/python3.12/dist-packages (from transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (1.11.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate>=0.26.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (5.9.5)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp<4,>=3.8.6->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.22.0)\n","Requirement already satisfied: griffe in /usr/local/lib/python3.12/dist-packages (from banks<3,>=2.2.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.14.0)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.34.0->transformers<5,>=4.37.0->transformers[torch]<5,>=4.37.0->llama-index-llms-huggingface) (1.2.0)\n","Requirement already satisfied: llama-index-instrumentation>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.2)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk>3.8.1->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk>3.8.1->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.5.2)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic>=2.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.2)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.4.4)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.11)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2.5.0)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests>=2.31.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (2025.10.5)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch<3,>=2.1.2->llama-index-llms-huggingface) (1.3.0)\n","Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.12/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.1.0)\n","Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.12/dist-packages (from dataclasses-json->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (3.26.1)\n","Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (4.11.0)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.16.0)\n","Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch<3,>=2.1.2->llama-index-llms-huggingface) (3.0.3)\n","Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.12/dist-packages (from anyio->httpx->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (1.3.1)\n","Requirement already satisfied: colorama>=0.4 in /usr/local/lib/python3.12/dist-packages (from griffe->banks<3,>=2.2.0->llama-index-core<0.15,>=0.13.0->llama-index-llms-huggingface) (0.4.6)\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"NXdeLi2NYchP","executionInfo":{"status":"ok","timestamp":1762091882924,"user_tz":-330,"elapsed":2769,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"18aab0d1-d8e5-46de-853f-f1b47f858471"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… OpenAI API key loaded from Colab secrets\n","โœ… OpenAI LLM configured successfully\n","โœ… Query engine with similarity cutoff 0.3 created\n","โœ… Query engine with similarity filtering created\n","\n","๐Ÿ” Testing query: 'What are the benefits of AI agents?'\n","๐Ÿ“ Response: The benefits of AI agents include their enhanced reasoning, planning, and tool execution capabilities, which enable them to achieve complex goals efficiently. Additionally, AI agents can communicate effectively, adapt to different scenarios, and work collaboratively in both single-agent and multi-agent architectures.\n"]}],"source":["import os\n","from google.colab import userdata\n","from llama_index.llms.openai import OpenAI\n","from llama_index.core import Settings\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","\n","# Get OpenAI API key from Colab secrets\n","try:\n"," openai_api_key = userdata.get('OPENAI_API') # Your secret name\n"," os.environ[\"OPENAI_API_KEY\"] = openai_api_key # What OpenAI expects\n"," print(\"โœ… OpenAI API key loaded from Colab secrets\")\n","except Exception as e:\n"," print(f\"โŒ Error loading OpenAI API key from secrets: {e}\")\n"," print(\"๐Ÿ’ก Make sure you have added 'OPENAI_API' to your Colab secrets\")\n"," print(\" Go to the key icon (๐Ÿ”‘) in the left sidebar and add your key\")\n","\n","# Use OpenAI which handles long contexts much better\n","Settings.llm = OpenAI(model=\"gpt-3.5-turbo\", max_tokens=256)\n","print(\"โœ… OpenAI LLM configured successfully\")\n","\n","def create_query_engine_with_similarity_filter(index, similarity_cutoff: float = 0.3, top_k: int = 5):\n"," \"\"\"\n"," Create a query engine that filters results based on similarity scores.\n","\n"," TODO: Complete this function to create a query engine with similarity postprocessing.\n"," HINT: Use index.as_query_engine() with node_postprocessors parameter containing SimilarityPostprocessor\n","\n"," Args:\n"," index: Vector index to query\n"," similarity_cutoff: Minimum similarity score (0.0 to 1.0)\n"," top_k: Number of initial results to retrieve before filtering\n","\n"," Returns:\n"," Query engine with similarity filtering\n"," \"\"\"\n"," try:\n"," # TODO: Create similarity postprocessor with the cutoff threshold\n"," similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n","\n"," # TODO: Create query engine with similarity filtering\n"," query_engine = index.as_query_engine(\n"," similarity_top_k=top_k,\n"," node_postprocessors=[similarity_processor]\n"," )\n","\n"," print(f\"โœ… Query engine with similarity cutoff {similarity_cutoff} created\")\n"," return query_engine\n","\n"," except Exception as e:\n"," print(f\"โŒ Error creating query engine: {e}\")\n"," return None\n","\n","# Test the function with error handling\n","if 'index' in locals() and index:\n"," filtered_engine = create_query_engine_with_similarity_filter(index, similarity_cutoff=0.3, top_k=3)\n","\n"," if filtered_engine:\n"," print(\"โœ… Query engine with similarity filtering created\")\n","\n"," # Test query\n"," test_query = \"What are the benefits of AI agents?\"\n"," print(f\"\\n๐Ÿ” Testing query: '{test_query}'\")\n","\n"," try:\n"," response = filtered_engine.query(test_query)\n"," print(f\"๐Ÿ“ Response: {response}\")\n"," except Exception as e:\n"," print(f\"โŒ Error during query: {e}\")\n"," print(\"๐Ÿ’ก Try using a different model or check your data preprocessing\")\n"," else:\n"," print(\"โŒ Failed to create filtered query engine\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")"]},{"cell_type":"markdown","metadata":{"id":"X8qGszd0YchP"},"source":["## 2. Response Synthesizers - TreeSummarize\n","\n","**Concept:** Response synthesizers control how retrieved information becomes final answers. `TreeSummarize` builds responses hierarchically, ideal for complex analytical questions.\n","\n","**Why it matters:** Different synthesis strategies work better for different query types. TreeSummarize excels at comprehensive analysis and long-form responses.\n","\n","Complete the function below to create a query engine with TreeSummarize response synthesis.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"th0Kz2rMYchP","executionInfo":{"status":"ok","timestamp":1762092383190,"user_tz":-330,"elapsed":4006,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"b704bba9-0419-421a-d682-018420b6046e"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Create query engine with TreeSummarize synthesis\n","โœ… Query engine with TreeSummarize created\n","\n","๐Ÿ” Testing analytical query: 'Compare the advantages and disadvantages of different AI agent frameworks'\n","๐Ÿ“ TreeSummarize Response:\n","Advantages and disadvantages of different AI agent frameworks can be compared based on factors such as complexity, learning curve, best use case, performance considerations, and suitability for different tasks. Frameworks like LangChain offer a moderate complexity level and are suitable for general LLM applications, while AutoGPT is known for its high complexity and steep learning curve, making it ideal for autonomous tasks. CrewAI, on the other hand, has a medium complexity level with an easy learning curve, making it suitable for team collaboration. LlamaIndex stands out with low complexity and ease of use, making it a good fit for document Q&A tasks. Performance considerations show that single agents typically have lower latency compared to multi-agent systems, but the latter are often more accurate for complex tasks. However, more agents in a system can lead to higher API costs. In terms of reliability, simpler frameworks are generally more stable. When choosing the right framework, beginners may find LlamaIndex or simple LangChain suitable, while those tackling complex tasks may benefit from using AutoGPT or multi-agent systems.\n"]}],"source":["def create_query_engine_with_tree_summarize(index, top_k: int = 5):\n"," \"\"\"\n"," Create a query engine that uses TreeSummarize for comprehensive responses.\n","\n"," TODO: Complete this function to create a query engine with TreeSummarize synthesis.\n"," HINT: Create a TreeSummarize instance, then use index.as_query_engine() with response_synthesizer parameter\n","\n"," Args:\n"," index: Vector index to query\n"," top_k: Number of results to retrieve\n","\n"," Returns:\n"," Query engine with TreeSummarize synthesis\n"," \"\"\"\n"," # TODO: Create TreeSummarize response synthesizer\n"," tree_synthesizer =TreeSummarize()\n","\n"," # TODO: Create query engine with the synthesizer\n"," query_engine = index.as_query_engine(\n"," response_synthesizer=tree_synthesizer,\n"," similarity_top_k=top_k\n"," )\n","\n","\n","\n"," # PLACEHOLDER - Replace with actual implementation\n"," print(f\"โœ… Create query engine with TreeSummarize synthesis\")\n","\n"," return query_engine\n","\n","\n","# Test the function\n","if index:\n"," tree_engine = create_query_engine_with_tree_summarize(index)\n","\n"," if tree_engine:\n"," print(\"โœ… Query engine with TreeSummarize created\")\n","\n"," # Test with a complex analytical query\n"," analytical_query = \"Compare the advantages and disadvantages of different AI agent frameworks\"\n"," print(f\"\\n๐Ÿ” Testing analytical query: '{analytical_query}'\")\n","\n"," try:\n"," response = tree_engine.query(analytical_query)\n"," print(f\"๐Ÿ“ TreeSummarize Response:\\n{response}\")\n"," except Exception as e:\n"," print(f\"โŒ Error during query: {e}\")\n"," # Uncomment when implemented:\n","\n","\n","\n"," else:\n"," print(\"โŒ Failed to create TreeSummarize query engine\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")\n"]},{"cell_type":"markdown","metadata":{"id":"abAfqxgnYchQ"},"source":["## 3. Structured Outputs with Pydantic Models\n","\n","**Concept:** Structured outputs ensure predictable, parseable responses using Pydantic models. This is essential for API endpoints and data pipelines.\n","\n","**Why it matters:** Instead of free-text responses, you get type-safe, validated data structures that applications can reliably process.\n","\n","Complete the function below to create a structured output system for extracting research paper information.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"G0AwVrGwYchQ","executionInfo":{"status":"ok","timestamp":1762095388759,"user_tz":-330,"elapsed":1762,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"eb3b067d-9dcb-407b-da11-60369ca3032f"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ…: Create structured output program with ResearchPaperInfo\n","โœ… Structured output program created\n","\n","๐Ÿ” Testing structured query: 'Tell me about AI agents and their capabilities'\n","๐Ÿ“Š Structured Response:\n","title='AI Agents and Their Capabilities' key_points=['Architectures leveraging advanced techniques are more effective across various benchmarks and problem types', 'Current AI-driven agents show promise but have notable limitations and areas for improvement', 'Challenges around agent benchmarks, real-world applicability, and mitigating harmful biases need to be addressed for reliable agents'] applications=[] summary='The survey explores the progression from static language models to dynamic, autonomous agents, providing a comprehensive understanding of the current AI agent landscape and insights for developers.'\n"]}],"source":["# First, define the Pydantic models for structured outputs\n","class ResearchPaperInfo(BaseModel):\n"," \"\"\"Structured information about a research paper or AI concept.\"\"\"\n"," title: str = Field(description=\"The main title or concept name\")\n"," key_points: List[str] = Field(description=\"3-5 main points or findings\")\n"," applications: List[str] = Field(description=\"Practical applications or use cases\")\n"," summary: str = Field(description=\"Brief 2-3 sentence summary\")\n","\n","# Import the missing component\n","from llama_index.core.program import LLMTextCompletionProgram\n","\n","def create_structured_output_program(output_model: BaseModel = ResearchPaperInfo):\n"," \"\"\"\n"," Create a structured output program using Pydantic models.\n","\n"," TODO: Complete this function to create a structured output program.\n"," HINT: Use LLMTextCompletionProgram.from_defaults() with PydanticOutputParser and a prompt template\n","\n"," Args:\n"," output_model: Pydantic model class for structured output\n","\n"," Returns:\n"," LLMTextCompletionProgram that returns structured data\n"," \"\"\"\n"," # TODO: Create output parser with the Pydantic model\n"," output_parser = PydanticOutputParser(output_model)\n","\n"," # TODO: Create the structured output program\n"," program = LLMTextCompletionProgram.from_defaults(\n"," output_parser=output_parser,\n"," prompt_template_str=(\n"," \"Extract structured information from the following context:\\n\"\n"," \"{context}\\n\\n\"\n"," \"Question: {query}\\n\\n\"\n"," \"Provide the information in the specified JSON format.\"\n"," )\n"," )\n","\n"," print(f\"โœ…: Create structured output program with {output_model.__name__}\")\n","\n"," return program\n","\n","\n","\n","# Test the function\n","if index:\n"," structured_program = create_structured_output_program(ResearchPaperInfo)\n","\n"," if structured_program:\n"," print(\"โœ… Structured output program created\")\n","\n"," # Test with retrieval and structured extraction\n"," structure_query = \"Tell me about AI agents and their capabilities\"\n"," print(f\"\\n๐Ÿ” Testing structured query: '{structure_query}'\")\n","\n"," # Get context for structured extraction (Uncomment when implemented)\n"," retriever = VectorIndexRetriever(index=index, similarity_top_k=3)\n"," nodes = retriever.retrieve(structure_query)\n"," context = \"\\n\".join([node.text for node in nodes])\n","\n","\n"," response = structured_program(context=context, query=structure_query)\n"," print(f\"๐Ÿ“Š Structured Response:\\n{response}\")\n","\n"," else:\n"," print(\"โŒ Failed to create structured output program\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")\n"]},{"cell_type":"markdown","metadata":{"id":"cuS2ueNtYchQ"},"source":["## 4. Advanced Pipeline - Combining All Techniques\n","\n","**Concept:** Combine multiple advanced techniques into a single powerful query engine: similarity filtering + response synthesis + structured output.\n","\n","**Why it matters:** Production RAG systems often need multiple techniques working together for optimal results.\n","\n","Complete the function below to create a comprehensive advanced RAG pipeline.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"zsKSzjscYchQ","executionInfo":{"status":"ok","timestamp":1762095552846,"user_tz":-330,"elapsed":2667,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"f16ed629-552a-47e5-8e66-8f0cde752139"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… : Create advanced RAG pipeline with all techniques\n","โœ… Advanced RAG pipeline created successfully!\n"," ๐Ÿ”ง Similarity filtering: โœ…\n"," ๐ŸŒณ TreeSummarize synthesis: โœ…\n","\n","๐Ÿ” Testing complex query: 'Analyze the current state and future potential of AI agent technologies'\n","๐Ÿš€ Advanced RAG Response:\n","The current state of AI agent technologies shows promising advancements in achieving complex goals that require enhanced reasoning, planning, and tool execution capabilities. Architectures leveraging these techniques have demonstrated effectiveness across various benchmarks and problem types. However, there are notable limitations that need to be addressed for future improvement. Challenges such as comprehensive agent benchmarks, real-world applicability, and mitigating harmful biases in language models are areas that require attention in the near term to enable the development of reliable agents. By transitioning from static language models to more dynamic, autonomous agents, the AI agent landscape is evolving towards more robust and capable systems. This progression aims to provide a holistic understanding of existing AI agent architectures and offers valuable insights for those involved in building or customizing agent systems.\n"," (Complete the function above to test the full pipeline)\n","\n","๐ŸŽฏ This should provide:\n"," - Filtered relevant results only\n"," - Comprehensive analytical response\n"," - Combined postprocessing and synthesis\n"]}],"source":["def create_advanced_rag_pipeline(index, similarity_cutoff: float = 0.3, top_k: int = 5):\n"," \"\"\"\n"," Create a comprehensive advanced RAG pipeline combining multiple techniques.\n","\n"," TODO: Complete this function to create the ultimate advanced RAG query engine.\n"," HINT: Combine SimilarityPostprocessor + TreeSummarize using index.as_query_engine()\n","\n"," Args:\n"," index: Vector index to query\n"," similarity_cutoff: Minimum similarity score for filtering\n"," top_k: Number of initial results to retrieve\n","\n"," Returns:\n"," Advanced query engine with filtering and synthesis combined\n"," \"\"\"\n"," # TODO: Create similarity postprocessor\n"," similarity_processor = SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)\n","\n"," # TODO: Create TreeSummarize for comprehensive responses\n"," tree_synthesizer = TreeSummarize()\n","\n"," # TODO: Create the comprehensive query engine combining both techniques\n"," advanced_engine = index.as_query_engine(\n"," response_synthesizer=tree_synthesizer,\n"," node_postprocessors=[similarity_processor],\n"," similarity_top_k=top_k\n"," )\n","\n"," print(f\"โœ… : Create advanced RAG pipeline with all techniques\")\n","\n"," return advanced_engine\n","\n"," # PLACEHOLDER - Replace with actual implementation\n","\n"," #return None\n","\n","# Test the comprehensive pipeline\n","if index:\n"," advanced_pipeline = create_advanced_rag_pipeline(index)\n","\n"," if advanced_pipeline:\n"," print(\"โœ… Advanced RAG pipeline created successfully!\")\n"," print(\" ๐Ÿ”ง Similarity filtering: โœ…\")\n"," print(\" ๐ŸŒณ TreeSummarize synthesis: โœ…\")\n","\n"," # Test with complex query\n"," complex_query = \"Analyze the current state and future potential of AI agent technologies\"\n"," print(f\"\\n๐Ÿ” Testing complex query: '{complex_query}'\")\n","\n"," # Uncomment when implemented:\n"," response = advanced_pipeline.query(complex_query)\n"," print(f\"๐Ÿš€ Advanced RAG Response:\\n{response}\")\n"," print(\" (Complete the function above to test the full pipeline)\")\n","\n"," print(\"\\n๐ŸŽฏ This should provide:\")\n"," print(\" - Filtered relevant results only\")\n"," print(\" - Comprehensive analytical response\")\n"," print(\" - Combined postprocessing and synthesis\")\n"," else:\n"," print(\"โŒ Failed to create advanced RAG pipeline\")\n","else:\n"," print(\"โŒ No index available - run previous cells first\")\n"]},{"cell_type":"markdown","metadata":{"id":"YpIQsi_bYchQ"},"source":["## 5. Final Test - Compare Basic vs Advanced RAG\n","\n","Once you've completed all the functions above, run this cell to compare basic RAG with your advanced techniques.\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"cOm-hv1cYchQ","executionInfo":{"status":"ok","timestamp":1762095884735,"user_tz":-330,"elapsed":10556,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"34e01e76-4134-4dcc-eba2-bb69ebb67a82"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\n","============================================================\n","\n","๐Ÿ“Š Component Status:\n"," โœ… Basic Index\n"," โœ… Similarity Filter\n"," โœ… TreeSummarize\n"," โœ… Structured Output\n"," โœ… Advanced Pipeline\n","\n","๐Ÿ” Creating basic query engine for comparison...\n","\n","============================================================\n","๐Ÿ†š COMPARISON: Basic vs Advanced RAG\n","============================================================\n","\n","๐Ÿ“‹ Test Query 1: 'What are the key capabilities of AI agents?'\n","--------------------------------------------------\n","๐Ÿ”น Basic RAG:\n"," Response: The key capabilities of AI agents include strong performance on complex tasks involving reasoning and tool execution, the ability to work iteratively towards goals, opportunities for human feedback, c...\n","\n","๐Ÿ”ธ Advanced RAG:\n"," Response: The key capabilities of AI agents include strong performance on complex tasks involving reasoning and tool execution, the ability to work iteratively towards goals, opportunities for human feedback, clear leadership, defined planning phases with opportunities for plan refinement, intelligent message filtering, and dynamic teams with agents possessing specific skills relevant to the current sub-task. These capabilities contribute to increased performance compared to architectures lacking these elements.\n","\n","๐Ÿ“‹ Test Query 2: 'How do you evaluate agent performance metrics?'\n","--------------------------------------------------\n","๐Ÿ”น Basic RAG:\n"," Response: By considering objective evaluation metrics like success rate, output similarity to human responses, and overall efficiency, as well as more nuanced or subjective measures such as efficiency of tool u...\n","\n","๐Ÿ”ธ Advanced RAG:\n"," Response: Evaluate agent performance metrics by considering objective evaluation metrics like success rate, output similarity to human responses, and overall efficiency. It is also important to take into account more nuanced or subjective measures of performance such as efficiency of tool use, reliability, and robustness of planning. Additionally, real-world applicability should be assessed by evaluating performance on tasks that cover a wide breadth of topics and are sourced from real conversations or issues, rather than just logic puzzles or video games.\n","\n","๐Ÿ“‹ Test Query 3: 'Explain the benefits and challenges of multimodal AI systems'\n","--------------------------------------------------\n","๐Ÿ”น Basic RAG:\n"," Response: Multimodal AI systems offer the advantage of combining multiple modes of input, such as text, images, and speech, to enhance understanding and improve performance on various tasks. By leveraging diffe...\n","\n","๐Ÿ”ธ Advanced RAG:\n"," Response: Multimodal AI systems offer the advantage of combining different modalities such as text, images, and speech to enhance understanding and performance in various tasks. By leveraging multiple modalities, these systems can provide more comprehensive and nuanced insights, leading to improved accuracy and effectiveness in tasks that require multimodal input. However, challenges may arise in multimodal AI systems related to data integration, model complexity, and computational resources. Coordinating information from different modalities, ensuring alignment between them, and managing the increased complexity of multimodal models can be demanding tasks. Additionally, training and deploying multimodal AI systems may require more computational resources compared to unimodal systems, potentially leading to longer processing times and higher resource consumption.\n","\n","============================================================\n","๐ŸŽฏ Assignment Status:\n"," Completed: 5/5 components\n","\n","๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\n"," โœ… Node postprocessors for result filtering\n"," โœ… Response synthesizers for better answers\n"," โœ… Structured outputs for reliable data\n"," โœ… Advanced pipelines combining all techniques\n","\n","๐Ÿš€ You're ready for production RAG systems!\n","\n","๐Ÿ’ก Key learnings:\n"," - Postprocessors improve result relevance and precision\n"," - Different synthesizers work better for different query types\n"," - Structured outputs enable reliable system integration\n"," - Advanced techniques can be combined for production systems\n"]}],"source":["# Final comparison: Basic vs Advanced RAG\n","print(\"๐Ÿš€ Advanced RAG Techniques Assignment - Final Test\")\n","print(\"=\" * 60)\n","\n","# Test queries for comparison\n","test_queries = [\n"," \"What are the key capabilities of AI agents?\",\n"," \"How do you evaluate agent performance metrics?\",\n"," \"Explain the benefits and challenges of multimodal AI systems\"\n","]\n","\n","# Check if all components were created\n","components_status = {\n"," \"Basic Index\": index is not None,\n"," \"Similarity Filter\": 'filtered_engine' in locals() and filtered_engine is not None,\n"," \"TreeSummarize\": 'tree_engine' in locals() and tree_engine is not None,\n"," \"Structured Output\": 'structured_program' in locals() and structured_program is not None,\n"," \"Advanced Pipeline\": 'advanced_pipeline' in locals() and advanced_pipeline is not None\n","}\n","\n","print(\"\\n๐Ÿ“Š Component Status:\")\n","for component, status in components_status.items():\n"," status_icon = \"โœ…\" if status else \"โŒ\"\n"," print(f\" {status_icon} {component}\")\n","\n","# Create basic query engine for comparison\n","if index:\n"," print(\"\\n๐Ÿ” Creating basic query engine for comparison...\")\n"," basic_engine = index.as_query_engine(similarity_top_k=5)\n","\n"," print(\"\\n\" + \"=\" * 60)\n"," print(\"๐Ÿ†š COMPARISON: Basic vs Advanced RAG\")\n"," print(\"=\" * 60)\n","\n"," for i, query in enumerate(test_queries, 1):\n"," print(f\"\\n๐Ÿ“‹ Test Query {i}: '{query}'\")\n"," print(\"-\" * 50)\n","\n"," # Basic RAG\n"," print(\"๐Ÿ”น Basic RAG:\")\n"," if basic_engine:\n"," # Uncomment when testing:\n"," basic_response = basic_engine.query(query)\n"," print(f\" Response: {str(basic_response)[:200]}...\")\n"," #print(\" (Standard vector search + simple response)\")\n","\n"," # Advanced RAG (if implemented)\n"," print(\"\\n๐Ÿ”ธ Advanced RAG:\")\n"," if components_status[\"Advanced Pipeline\"]:\n"," # Uncomment when testing:\n"," advanced_response = advanced_pipeline.query(query)\n"," print(f\" Response: {advanced_response}\")\n"," #print(\" (Filtered + TreeSummarize + Structured output)\")\n"," else:\n"," print(\" Complete the advanced pipeline function to test\")\n","\n","# Final status\n","print(\"\\n\" + \"=\" * 60)\n","print(\"๐ŸŽฏ Assignment Status:\")\n","completed_count = sum(components_status.values())\n","total_count = len(components_status)\n","\n","print(f\" Completed: {completed_count}/{total_count} components\")\n","\n","if completed_count == total_count:\n"," print(\"\\n๐ŸŽ‰ Congratulations! You've mastered Advanced RAG Techniques!\")\n"," print(\" โœ… Node postprocessors for result filtering\")\n"," print(\" โœ… Response synthesizers for better answers\")\n"," print(\" โœ… Structured outputs for reliable data\")\n"," print(\" โœ… Advanced pipelines combining all techniques\")\n"," print(\"\\n๐Ÿš€ You're ready for production RAG systems!\")\n","else:\n"," missing = total_count - completed_count\n"," print(f\"\\n๐Ÿ“ Complete {missing} more components to finish the assignment:\")\n"," for component, status in components_status.items():\n"," if not status:\n"," print(f\" - {component}\")\n","\n","print(\"\\n๐Ÿ’ก Key learnings:\")\n","print(\" - Postprocessors improve result relevance and precision\")\n","print(\" - Different synthesizers work better for different query types\")\n","print(\" - Structured outputs enable reliable system integration\")\n","print(\" - Advanced techniques can be combined for production systems\")\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"name":"python","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"a9d4ddd2138e497f9050b59d4a579687":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_28265196138e4fc2b71a6f28b6c9ae0c","IPY_MODEL_a341f62d1b374c63b71f478afe421377","IPY_MODEL_1a852a69c3274338bd6634fb0c16a457"],"layout":"IPY_MODEL_d1a87baee2b34ff39294a9957609ae33"}},"28265196138e4fc2b71a6f28b6c9ae0c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0b6a9975c3a84f048b3b34e678c1a3d8","placeholder":"โ€‹","style":"IPY_MODEL_70441a90759e419fbc118642ee3afeb8","value":"Parsingโ€‡nodes:โ€‡100%"}},"a341f62d1b374c63b71f478afe421377":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e80346f88fa049f9bc4690b520faeadf","max":42,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2607a80a19ef469a852a574dde7deec9","value":42}},"1a852a69c3274338bd6634fb0c16a457":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_752bdeb9460947cdb9731e87828b93c7","placeholder":"โ€‹","style":"IPY_MODEL_a9a4ffd40f97484fa39374570f6fd006","value":"โ€‡42/42โ€‡[00:00<00:00,โ€‡215.18it/s]"}},"d1a87baee2b34ff39294a9957609ae33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b6a9975c3a84f048b3b34e678c1a3d8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"70441a90759e419fbc118642ee3afeb8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e80346f88fa049f9bc4690b520faeadf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2607a80a19ef469a852a574dde7deec9":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"752bdeb9460947cdb9731e87828b93c7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a9a4ffd40f97484fa39374570f6fd006":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"18aba1e55c084b45a59d723c920649db":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a8395d6fb02e49cb8062308a40db32b8","IPY_MODEL_4d2373e1731b46f6960ae50e48ea2d33","IPY_MODEL_cc18d8a0b8904643a0cd1f13b7ba8bd3"],"layout":"IPY_MODEL_e051a3be3c35478cbd343dbacdd48acf"}},"a8395d6fb02e49cb8062308a40db32b8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a011adb7c30245deb082532acfd038cb","placeholder":"โ€‹","style":"IPY_MODEL_4208e397a3a7458aa5b16c91bc7b5ac8","value":"Generatingโ€‡embeddings:โ€‡100%"}},"4d2373e1731b46f6960ae50e48ea2d33":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_f1c05ad90ee74a1fbb77d618ff193971","max":93,"min":0,"orientation":"horizontal","style":"IPY_MODEL_656d18ae543a463ea98145be9cc6771e","value":93}},"cc18d8a0b8904643a0cd1f13b7ba8bd3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6141e0010d114134ba65680c86be8720","placeholder":"โ€‹","style":"IPY_MODEL_c86f10069df241d3adfcf0f884a61389","value":"โ€‡93/93โ€‡[00:53<00:00,โ€‡โ€‡1.82it/s]"}},"e051a3be3c35478cbd343dbacdd48acf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a011adb7c30245deb082532acfd038cb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4208e397a3a7458aa5b16c91bc7b5ac8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f1c05ad90ee74a1fbb77d618ff193971":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"656d18ae543a463ea98145be9cc6771e":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"6141e0010d114134ba65680c86be8720":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c86f10069df241d3adfcf0f884a61389":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Monalisa_Samal/assignment_3a_basic_gradio_rag.ipynb b/Monalisa_Samal/assignment_3a_basic_gradio_rag.ipynb deleted file mode 100644 index 5c35784..0000000 --- a/Monalisa_Samal/assignment_3a_basic_gradio_rag.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"sFWLqj78Hkz3"},"source":["# Assignment 3a: Basic Gradio RAG Frontend\n","## Day 6 Session 2 - Building Simple RAG Applications\n","\n","In this assignment, you'll build a simple Gradio frontend for your RAG system with just the essential features:\n","- Button to initialize the vector database\n","- Search query input and button\n","- Display of AI responses\n","\n","**Learning Objectives:**\n","- Create basic Gradio interfaces\n","- Connect RAG backend to frontend\n","- Handle user interactions and database initialization\n","- Build functional AI-powered web applications\n","\n","**Prerequisites:**\n","- Completed Assignment 1 (Vector Database Basics)\n","- Completed Assignment 2 (Advanced RAG)\n","- Understanding of LlamaIndex fundamentals\n"]},{"cell_type":"markdown","metadata":{"id":"ZnOQlWbVHkz5"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries for building your Gradio RAG application.\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"vvsKCOpDHxNV","executionInfo":{"status":"ok","timestamp":1762098161984,"user_tz":-330,"elapsed":13088,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"4ad37e51-53b9-45d9-d750-855729cd889d"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["# If it's in a specific folder (e.g., \"Projects/MyProject/\")\n","!pip install -r '/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"0dIMu0onUfQZ","executionInfo":{"status":"ok","timestamp":1762099657508,"user_tz":-330,"elapsed":56094,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"7d2de8f7-b3f8-4a2d-da92-2192541a3330"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m26.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m92.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m67.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m104.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m22.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m16.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m96.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m26.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m19.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m12.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=ddcea9f4d4fb0e3627f63f4273e44a3f3c30abbf190c69dfcc64f51f352d3803\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"ae862be1c9ae416ab3c67ffa196e0647"}},"metadata":{}}]},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"n9bNl5sSHkz5","executionInfo":{"status":"ok","timestamp":1762099739954,"user_tz":-330,"elapsed":46928,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"7a060eab-1636-4fd2-95ed-e1784d9bace9"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","\n","# LlamaIndex components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"YgGcrP-0Hkz6"},"source":["## ๐Ÿค– Part 2: RAG Backend Class\n","\n","Create a simple RAG backend that can initialize the database and answer queries.\n"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":386,"referenced_widgets":["29c8526118734cdfb4e39787887b3c8d","8adf73dc0ed649a5a8c5a5d9025e48ff","2fe46a51fb8144f1bda9f7a291088063","ac01b9c0a1f642feb84b714053ba82fd","5380a2c612934f608b72ab08f38606d3","f848556fe0254588a75c886a9d8f2eea","4d2cd19e968d42a5aeeca9e450d1bd8b","64ac224325924d94b411ee2848bab330","d4f769a917124dc4b807dc9175c986e7","4b350e017a5d4695905ee915f7d28e51","7dd16942c28c4f14808cdbcf859c21f2","23a2f365cc6f4f8ebc5fdacdf77b7005","294103a64b724f1bb2d040ed39e741e2","ab3a38c9dd974da787b6c5ef7ed47b0b","fbf20aad929a4055b71784bbbe47e48e","54c77334589a4ca28c74dd164fa4c811","46cee2f68aba4304af164cfd8f03a27b","46ac5f7f07f54b9cac4df267afdea68d","c6af8202f2ec44c88bdf9b7509636ad2","b87a4b50638c487abf04b62651fc1ecf","af461b7a4bca42a4bc803234b85adc38","cdee1e8d4ae645cdacd7844a35129b29","56efd96439544945bcfca3e79cbb1a29","2afb8c115dfd492aa81ea3208fc6371b","3fca0ca9cbd7429f91aca4288f4c232b","af6cb5768d1244f0861609db8181e1e0","db890f449baf471fa1730e0f49a65894","e7b24e6d82b84fe3a4ed97aab23487af","88ba09d1e813487598ff3fe92a87b672","5f3109bfe82948e9a85cf51c49d93846","f78db77bab864007b6f85443efec8bbb","36e4e1bd96f042e5ad186b304f1d716c","11ab2513be86450d8f17eff3fcac390f","80f64e3d3ea14fee938443de745a4cc2","464a7aa64455427a8b241987d40575f2","ca08abbe9fde41d9871b5ab91535ba88","50ad34e4863d4f3ba07f18185521f42c","6f3b172cb35e447f961bd0978442bf0e","3babe7706f5949afbfa3757b0bfbbaf4","25526400e1604aada1f5e6bda33a9089","c471b58ef4504e248e056fbe4a262281","dee29c448abb430e925c366228d74bed","2de1b5901612494f87b0ca0fed9b66d3","88dbdbdea2354247ad95e67a1090a190","594df45c0a094b68a268b0d5412af94d","e6986b93f60f421e94dd37ab830e0de3","3bac9dfe41604d0583bdca6f932033b8","30214e34272040679dc5fa15f720064f","0229d2ab797a4dffac04e96d6b08e699","456146dfda614fdf900fa253411f2643","b26b541ab9d3415cb742df152837a21a","7c4f7c59205741c09bb0c45a2ee079fe","5d4b6948dbbe487393d04ec5a966fbec","bbb4be2fa3a04ce99b1e93d5c447d7d6","0025ea31940e419b828376df6b1cc37b","095b448393d44d8a87995166e71f5f95","63899673860b4061886ae8d7b5a03713","3cda42948f734557b95e86700cc19270","3e5402451c724a5a82754985a18754ce","9a1d3ffc42094c04b9ffa54b350c1804","31f349efe39d4d9ca69fb36ddc712d3f","b7f1eed940a545c09e6c496e2aae44d2","38c8fb204aa4425d8e4a51258ae164ca","36b571f73d0e41f888f9e2ac882b6675","5cd90753e88945db85ebd4047c5f1089","7ab7fb0dabd2431b8da42f29112ad550","73fed5108a93410aa40e387264255160","da14e21da45546abb098e700e5e60a33","c8f9b4a9134c47929021528fc8f64f71","0c5f353360e04cc6af1f2487fc30b3ff","34aa9d04461b4e919d07830d23f9fda1","651dcd57e8aa45fa89268841297ae6a3","526bf032890a4256aefc8d50cc7d8371","bdb8cfc7dfc242799a40586df8b91d6e","82b23de6ea3f46cd984e90b3ab9f3eec","769d61e92083433aac21e9f4428a30e3","fdfdbff8f89f49b387ddb0e4061afddf","dbb0169d244247b59d4f972e34f07d57","007c89344ae949d9b6aafc8c14bcfe88","d85e273c60b649f89d1a34cc8c6b8933","0b6ef9ec809f40f8a9ba79a0f7e13dfc","3e53ee3f24d64bc7a524ce7e78c29f17","52389e6daed54e5d854b20217b53e62d","82d09953a81741b3b6f799ce907f7739","a6759f8c292d4927b424ff16987d120e","ed6c2f99446c49eba1b026cb1dcdd991","fdae79be31cc471ca7d2a3d830b937fe","30494ca740bb48708a8a5d8f46960cee","c94a678808f649e09449d5e0c1610998","243c57bc25b34a3c93c038995dabb6c8","d35a8e85ebc24775923dcb6fc8bf3b94","6f7bbd2e653a40eb82fb35a9465d8aa9","98461d6a77d5473d94da2a21976125f2","87b2133871ef4e7cab1bde01b0a0f638","6067c0a455e74365ac64b9d6d197b415","299b4358ad4c4dff91b3714c90ee2a2b","235e44623aac4ca684a05cf615e09e7f","0060126456ba450bb5994823c19a8fe0","dee0a0429ea140d2b94c01efffe6834b","f1c8234481fe42e488d6d94fc0bc04b3","a50910a2209c462bbdbd8c6879221290","ec48f5c6933444cdad1b8acf78f4ecdf","1475e0bfd0e448cab6d9e5946baf87db","ae00ff78797446818d0a01ebf2827171","90a843cf4afb4fc5a0e393ece36cac50","deb3da53beb5450f8369e6780aeeb918","6b0b8031053a4d669fbc4d5d40b228e3","45dcadbbcc014b7398c33f65f51ed55a","1f0817d471364e16aa312e55ff02e05b","211a9a84c96e431d8e3e28cc91036e93","c4fc0f1e4e9b4b239867d23d2b4bb700","7051d3fd92b5498597da75e856b6ccf0","ebfa88ffe54f41fc8ca89f5f65664a91","3e92ee759cd74c4a8d97e5ef5c0de5aa","14b8e6e1560d445e9634b103b408001e","8cc241c20c8e49c69f97b2b30ea07192","aaf21bb0f9a047869f2e4c409e507f56","c88f0ef122da461cbb26199c236b6d09","711140217c5b487db9bd3d0f70ffd5dd","ae8b66f993804200bc802ae72a6ff8e9","f576212311c94fffa464747883fa789b"]},"id":"xU3YJR9GHkz7","executionInfo":{"status":"ok","timestamp":1762100197424,"user_tz":-330,"elapsed":17917,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"f5f27747-4d8e-4255-99b2-db45d4c5d33a"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00"],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":4}],"source":["print(\"๐ŸŽ‰ Launching your Basic RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Enter a question in the query box\")\n","print(\"4. Click 'Ask Question' to get AI response\")\n","print(\"\")\n","print(\"๐Ÿ’ก Example questions to try:\")\n","print(\"- What are the main topics in the documents?\")\n","print(\"- Summarize the key findings\")\n","print(\"- Explain the methodology used\")\n","print(\"\")\n","print(\"๐Ÿš€ Launch your app:\")\n","\n","# Your launch code here:\n","# Uncomment when implemented\n","basic_interface.launch(share=True)"]},{"cell_type":"markdown","metadata":{"id":"v51KJpIHHkz9"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [x] RAG backend is provided and working\n","- [ ] Created Gradio interface with required components:\n"," - [ ] Title and description using gr.Markdown()\n"," - [ ] Initialize database button using gr.Button()\n"," - [ ] Status output using gr.Textbox()\n"," - [ ] Query input field using gr.Textbox()\n"," - [ ] Submit query button using gr.Button()\n"," - [ ] Response output area using gr.Textbox()\n","- [ ] Connected buttons to backend functions using .click()\n","- [ ] Successfully launched the application\n","- [ ] Tested the full workflow (initialize โ†’ query โ†’ response)\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built your first Gradio RAG application! You now have:\n","\n","- A functional web interface for your RAG system\n","- Understanding of Gradio basics and component connections\n","- A foundation for building more complex AI applications\n","\n","**Next Steps**: Complete Assignment 3b to add advanced configuration options to your RAG interface!\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"29c8526118734cdfb4e39787887b3c8d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_8adf73dc0ed649a5a8c5a5d9025e48ff","IPY_MODEL_2fe46a51fb8144f1bda9f7a291088063","IPY_MODEL_ac01b9c0a1f642feb84b714053ba82fd"],"layout":"IPY_MODEL_5380a2c612934f608b72ab08f38606d3"}},"8adf73dc0ed649a5a8c5a5d9025e48ff":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f848556fe0254588a75c886a9d8f2eea","placeholder":"โ€‹","style":"IPY_MODEL_4d2cd19e968d42a5aeeca9e450d1bd8b","value":"modules.json:โ€‡100%"}},"2fe46a51fb8144f1bda9f7a291088063":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_64ac224325924d94b411ee2848bab330","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d4f769a917124dc4b807dc9175c986e7","value":349}},"ac01b9c0a1f642feb84b714053ba82fd":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4b350e017a5d4695905ee915f7d28e51","placeholder":"โ€‹","style":"IPY_MODEL_7dd16942c28c4f14808cdbcf859c21f2","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡23.1kB/s]"}},"5380a2c612934f608b72ab08f38606d3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f848556fe0254588a75c886a9d8f2eea":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d2cd19e968d42a5aeeca9e450d1bd8b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"64ac224325924d94b411ee2848bab330":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d4f769a917124dc4b807dc9175c986e7":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"4b350e017a5d4695905ee915f7d28e51":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7dd16942c28c4f14808cdbcf859c21f2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"23a2f365cc6f4f8ebc5fdacdf77b7005":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_294103a64b724f1bb2d040ed39e741e2","IPY_MODEL_ab3a38c9dd974da787b6c5ef7ed47b0b","IPY_MODEL_fbf20aad929a4055b71784bbbe47e48e"],"layout":"IPY_MODEL_54c77334589a4ca28c74dd164fa4c811"}},"294103a64b724f1bb2d040ed39e741e2":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_46cee2f68aba4304af164cfd8f03a27b","placeholder":"โ€‹","style":"IPY_MODEL_46ac5f7f07f54b9cac4df267afdea68d","value":"config_sentence_transformers.json:โ€‡100%"}},"ab3a38c9dd974da787b6c5ef7ed47b0b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c6af8202f2ec44c88bdf9b7509636ad2","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b87a4b50638c487abf04b62651fc1ecf","value":124}},"fbf20aad929a4055b71784bbbe47e48e":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_af461b7a4bca42a4bc803234b85adc38","placeholder":"โ€‹","style":"IPY_MODEL_cdee1e8d4ae645cdacd7844a35129b29","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡5.79kB/s]"}},"54c77334589a4ca28c74dd164fa4c811":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46cee2f68aba4304af164cfd8f03a27b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46ac5f7f07f54b9cac4df267afdea68d":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c6af8202f2ec44c88bdf9b7509636ad2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b87a4b50638c487abf04b62651fc1ecf":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"af461b7a4bca42a4bc803234b85adc38":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cdee1e8d4ae645cdacd7844a35129b29":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"56efd96439544945bcfca3e79cbb1a29":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_2afb8c115dfd492aa81ea3208fc6371b","IPY_MODEL_3fca0ca9cbd7429f91aca4288f4c232b","IPY_MODEL_af6cb5768d1244f0861609db8181e1e0"],"layout":"IPY_MODEL_db890f449baf471fa1730e0f49a65894"}},"2afb8c115dfd492aa81ea3208fc6371b":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e7b24e6d82b84fe3a4ed97aab23487af","placeholder":"โ€‹","style":"IPY_MODEL_88ba09d1e813487598ff3fe92a87b672","value":"README.md:โ€‡"}},"3fca0ca9cbd7429f91aca4288f4c232b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5f3109bfe82948e9a85cf51c49d93846","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_f78db77bab864007b6f85443efec8bbb","value":1}},"af6cb5768d1244f0861609db8181e1e0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_36e4e1bd96f042e5ad186b304f1d716c","placeholder":"โ€‹","style":"IPY_MODEL_11ab2513be86450d8f17eff3fcac390f","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡5.95MB/s]"}},"db890f449baf471fa1730e0f49a65894":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e7b24e6d82b84fe3a4ed97aab23487af":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"88ba09d1e813487598ff3fe92a87b672":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5f3109bfe82948e9a85cf51c49d93846":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"f78db77bab864007b6f85443efec8bbb":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"36e4e1bd96f042e5ad186b304f1d716c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"11ab2513be86450d8f17eff3fcac390f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"80f64e3d3ea14fee938443de745a4cc2":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_464a7aa64455427a8b241987d40575f2","IPY_MODEL_ca08abbe9fde41d9871b5ab91535ba88","IPY_MODEL_50ad34e4863d4f3ba07f18185521f42c"],"layout":"IPY_MODEL_6f3b172cb35e447f961bd0978442bf0e"}},"464a7aa64455427a8b241987d40575f2":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3babe7706f5949afbfa3757b0bfbbaf4","placeholder":"โ€‹","style":"IPY_MODEL_25526400e1604aada1f5e6bda33a9089","value":"sentence_bert_config.json:โ€‡100%"}},"ca08abbe9fde41d9871b5ab91535ba88":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c471b58ef4504e248e056fbe4a262281","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_dee29c448abb430e925c366228d74bed","value":52}},"50ad34e4863d4f3ba07f18185521f42c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2de1b5901612494f87b0ca0fed9b66d3","placeholder":"โ€‹","style":"IPY_MODEL_88dbdbdea2354247ad95e67a1090a190","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡5.00kB/s]"}},"6f3b172cb35e447f961bd0978442bf0e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3babe7706f5949afbfa3757b0bfbbaf4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"25526400e1604aada1f5e6bda33a9089":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c471b58ef4504e248e056fbe4a262281":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dee29c448abb430e925c366228d74bed":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2de1b5901612494f87b0ca0fed9b66d3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"88dbdbdea2354247ad95e67a1090a190":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"594df45c0a094b68a268b0d5412af94d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_e6986b93f60f421e94dd37ab830e0de3","IPY_MODEL_3bac9dfe41604d0583bdca6f932033b8","IPY_MODEL_30214e34272040679dc5fa15f720064f"],"layout":"IPY_MODEL_0229d2ab797a4dffac04e96d6b08e699"}},"e6986b93f60f421e94dd37ab830e0de3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_456146dfda614fdf900fa253411f2643","placeholder":"โ€‹","style":"IPY_MODEL_b26b541ab9d3415cb742df152837a21a","value":"config.json:โ€‡100%"}},"3bac9dfe41604d0583bdca6f932033b8":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_7c4f7c59205741c09bb0c45a2ee079fe","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_5d4b6948dbbe487393d04ec5a966fbec","value":743}},"30214e34272040679dc5fa15f720064f":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bbb4be2fa3a04ce99b1e93d5c447d7d6","placeholder":"โ€‹","style":"IPY_MODEL_0025ea31940e419b828376df6b1cc37b","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡71.6kB/s]"}},"0229d2ab797a4dffac04e96d6b08e699":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"456146dfda614fdf900fa253411f2643":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b26b541ab9d3415cb742df152837a21a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7c4f7c59205741c09bb0c45a2ee079fe":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5d4b6948dbbe487393d04ec5a966fbec":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"bbb4be2fa3a04ce99b1e93d5c447d7d6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0025ea31940e419b828376df6b1cc37b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"095b448393d44d8a87995166e71f5f95":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_63899673860b4061886ae8d7b5a03713","IPY_MODEL_3cda42948f734557b95e86700cc19270","IPY_MODEL_3e5402451c724a5a82754985a18754ce"],"layout":"IPY_MODEL_9a1d3ffc42094c04b9ffa54b350c1804"}},"63899673860b4061886ae8d7b5a03713":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_31f349efe39d4d9ca69fb36ddc712d3f","placeholder":"โ€‹","style":"IPY_MODEL_b7f1eed940a545c09e6c496e2aae44d2","value":"model.safetensors:โ€‡100%"}},"3cda42948f734557b95e86700cc19270":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_38c8fb204aa4425d8e4a51258ae164ca","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_36b571f73d0e41f888f9e2ac882b6675","value":133466304}},"3e5402451c724a5a82754985a18754ce":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5cd90753e88945db85ebd4047c5f1089","placeholder":"โ€‹","style":"IPY_MODEL_7ab7fb0dabd2431b8da42f29112ad550","value":"โ€‡133M/133Mโ€‡[00:02<00:00,โ€‡107MB/s]"}},"9a1d3ffc42094c04b9ffa54b350c1804":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"31f349efe39d4d9ca69fb36ddc712d3f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b7f1eed940a545c09e6c496e2aae44d2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"38c8fb204aa4425d8e4a51258ae164ca":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"36b571f73d0e41f888f9e2ac882b6675":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5cd90753e88945db85ebd4047c5f1089":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7ab7fb0dabd2431b8da42f29112ad550":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"73fed5108a93410aa40e387264255160":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_da14e21da45546abb098e700e5e60a33","IPY_MODEL_c8f9b4a9134c47929021528fc8f64f71","IPY_MODEL_0c5f353360e04cc6af1f2487fc30b3ff"],"layout":"IPY_MODEL_34aa9d04461b4e919d07830d23f9fda1"}},"da14e21da45546abb098e700e5e60a33":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_651dcd57e8aa45fa89268841297ae6a3","placeholder":"โ€‹","style":"IPY_MODEL_526bf032890a4256aefc8d50cc7d8371","value":"tokenizer_config.json:โ€‡100%"}},"c8f9b4a9134c47929021528fc8f64f71":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_bdb8cfc7dfc242799a40586df8b91d6e","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_82b23de6ea3f46cd984e90b3ab9f3eec","value":366}},"0c5f353360e04cc6af1f2487fc30b3ff":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_769d61e92083433aac21e9f4428a30e3","placeholder":"โ€‹","style":"IPY_MODEL_fdfdbff8f89f49b387ddb0e4061afddf","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡27.0kB/s]"}},"34aa9d04461b4e919d07830d23f9fda1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"651dcd57e8aa45fa89268841297ae6a3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"526bf032890a4256aefc8d50cc7d8371":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bdb8cfc7dfc242799a40586df8b91d6e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"82b23de6ea3f46cd984e90b3ab9f3eec":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"769d61e92083433aac21e9f4428a30e3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fdfdbff8f89f49b387ddb0e4061afddf":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dbb0169d244247b59d4f972e34f07d57":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_007c89344ae949d9b6aafc8c14bcfe88","IPY_MODEL_d85e273c60b649f89d1a34cc8c6b8933","IPY_MODEL_0b6ef9ec809f40f8a9ba79a0f7e13dfc"],"layout":"IPY_MODEL_3e53ee3f24d64bc7a524ce7e78c29f17"}},"007c89344ae949d9b6aafc8c14bcfe88":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_52389e6daed54e5d854b20217b53e62d","placeholder":"โ€‹","style":"IPY_MODEL_82d09953a81741b3b6f799ce907f7739","value":"vocab.txt:โ€‡"}},"d85e273c60b649f89d1a34cc8c6b8933":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a6759f8c292d4927b424ff16987d120e","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_ed6c2f99446c49eba1b026cb1dcdd991","value":1}},"0b6ef9ec809f40f8a9ba79a0f7e13dfc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_fdae79be31cc471ca7d2a3d830b937fe","placeholder":"โ€‹","style":"IPY_MODEL_30494ca740bb48708a8a5d8f46960cee","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡5.61MB/s]"}},"3e53ee3f24d64bc7a524ce7e78c29f17":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"52389e6daed54e5d854b20217b53e62d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"82d09953a81741b3b6f799ce907f7739":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a6759f8c292d4927b424ff16987d120e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"ed6c2f99446c49eba1b026cb1dcdd991":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"fdae79be31cc471ca7d2a3d830b937fe":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"30494ca740bb48708a8a5d8f46960cee":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c94a678808f649e09449d5e0c1610998":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_243c57bc25b34a3c93c038995dabb6c8","IPY_MODEL_d35a8e85ebc24775923dcb6fc8bf3b94","IPY_MODEL_6f7bbd2e653a40eb82fb35a9465d8aa9"],"layout":"IPY_MODEL_98461d6a77d5473d94da2a21976125f2"}},"243c57bc25b34a3c93c038995dabb6c8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_87b2133871ef4e7cab1bde01b0a0f638","placeholder":"โ€‹","style":"IPY_MODEL_6067c0a455e74365ac64b9d6d197b415","value":"tokenizer.json:โ€‡"}},"d35a8e85ebc24775923dcb6fc8bf3b94":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_299b4358ad4c4dff91b3714c90ee2a2b","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_235e44623aac4ca684a05cf615e09e7f","value":1}},"6f7bbd2e653a40eb82fb35a9465d8aa9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0060126456ba450bb5994823c19a8fe0","placeholder":"โ€‹","style":"IPY_MODEL_dee0a0429ea140d2b94c01efffe6834b","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡10.1MB/s]"}},"98461d6a77d5473d94da2a21976125f2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"87b2133871ef4e7cab1bde01b0a0f638":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6067c0a455e74365ac64b9d6d197b415":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"299b4358ad4c4dff91b3714c90ee2a2b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"235e44623aac4ca684a05cf615e09e7f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0060126456ba450bb5994823c19a8fe0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dee0a0429ea140d2b94c01efffe6834b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f1c8234481fe42e488d6d94fc0bc04b3":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a50910a2209c462bbdbd8c6879221290","IPY_MODEL_ec48f5c6933444cdad1b8acf78f4ecdf","IPY_MODEL_1475e0bfd0e448cab6d9e5946baf87db"],"layout":"IPY_MODEL_ae00ff78797446818d0a01ebf2827171"}},"a50910a2209c462bbdbd8c6879221290":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_90a843cf4afb4fc5a0e393ece36cac50","placeholder":"โ€‹","style":"IPY_MODEL_deb3da53beb5450f8369e6780aeeb918","value":"special_tokens_map.json:โ€‡100%"}},"ec48f5c6933444cdad1b8acf78f4ecdf":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6b0b8031053a4d669fbc4d5d40b228e3","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_45dcadbbcc014b7398c33f65f51ed55a","value":125}},"1475e0bfd0e448cab6d9e5946baf87db":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1f0817d471364e16aa312e55ff02e05b","placeholder":"โ€‹","style":"IPY_MODEL_211a9a84c96e431d8e3e28cc91036e93","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡8.50kB/s]"}},"ae00ff78797446818d0a01ebf2827171":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"90a843cf4afb4fc5a0e393ece36cac50":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"deb3da53beb5450f8369e6780aeeb918":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6b0b8031053a4d669fbc4d5d40b228e3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"45dcadbbcc014b7398c33f65f51ed55a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"1f0817d471364e16aa312e55ff02e05b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"211a9a84c96e431d8e3e28cc91036e93":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c4fc0f1e4e9b4b239867d23d2b4bb700":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_7051d3fd92b5498597da75e856b6ccf0","IPY_MODEL_ebfa88ffe54f41fc8ca89f5f65664a91","IPY_MODEL_3e92ee759cd74c4a8d97e5ef5c0de5aa"],"layout":"IPY_MODEL_14b8e6e1560d445e9634b103b408001e"}},"7051d3fd92b5498597da75e856b6ccf0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8cc241c20c8e49c69f97b2b30ea07192","placeholder":"โ€‹","style":"IPY_MODEL_aaf21bb0f9a047869f2e4c409e507f56","value":"config.json:โ€‡100%"}},"ebfa88ffe54f41fc8ca89f5f65664a91":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c88f0ef122da461cbb26199c236b6d09","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_711140217c5b487db9bd3d0f70ffd5dd","value":190}},"3e92ee759cd74c4a8d97e5ef5c0de5aa":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ae8b66f993804200bc802ae72a6ff8e9","placeholder":"โ€‹","style":"IPY_MODEL_f576212311c94fffa464747883fa789b","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡7.69kB/s]"}},"14b8e6e1560d445e9634b103b408001e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8cc241c20c8e49c69f97b2b30ea07192":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"aaf21bb0f9a047869f2e4c409e507f56":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c88f0ef122da461cbb26199c236b6d09":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"711140217c5b487db9bd3d0f70ffd5dd":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ae8b66f993804200bc802ae72a6ff8e9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f576212311c94fffa464747883fa789b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Monalisa_Samal/assignment_3b_advanced_gradio_rag.ipynb b/Monalisa_Samal/assignment_3b_advanced_gradio_rag.ipynb deleted file mode 100644 index 9ca3fec..0000000 --- a/Monalisa_Samal/assignment_3b_advanced_gradio_rag.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"pKDowy-WZFvi"},"source":["# Assignment 3b: Advanced Gradio RAG Frontend\n","## Day 6 Session 2 - Building Configurable RAG Applications\n","\n","In this assignment, you'll extend your basic RAG interface with advanced configuration options to create a professional, feature-rich RAG application.\n","\n","**New Features to Add:**\n","- Model selection dropdown (gpt-4o, gpt-4o-mini)\n","- Temperature slider (0 to 1 with 0.1 intervals)\n","- Chunk size configuration\n","- Chunk overlap configuration \n","- Similarity top-k slider\n","- Node postprocessor multiselect\n","- Similarity cutoff slider\n","- Response synthesizer multiselect\n","\n","**Learning Objectives:**\n","- Advanced Gradio components and interactions\n","- Dynamic RAG configuration\n","- Professional UI design patterns\n","- Parameter validation and handling\n","- Building production-ready AI applications\n","\n","**Prerequisites:**\n","- Completed Assignment 3a (Basic Gradio RAG)\n","- Understanding of RAG parameters and their effects\n"]},{"cell_type":"markdown","metadata":{"id":"EUgCpBomZFvl"},"source":["## ๐Ÿ“š Part 1: Setup and Imports\n","\n","Import all necessary libraries including advanced RAG components for configuration options.\n","\n","**Note:** This assignment uses OpenRouter for LLM access (not OpenAI). Make sure you have your `OPENROUTER_API_KEY` environment variable set.\n"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"9JfGHoNeZKs9","executionInfo":{"status":"ok","timestamp":1762100857948,"user_tz":-330,"elapsed":22653,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"3680aa8b-e060-43a3-f8c7-0f948706d9b8"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["# If it's in a specific folder (e.g., \"Projects/MyProject/\")\n","!pip install -r '/content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt'"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"cpEwIq29ZWU4","executionInfo":{"status":"ok","timestamp":1762100923592,"user_tz":-330,"elapsed":46970,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"c810f687-58d5-42d0-adb8-9808b40148cc"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.13.5)\n","Requirement already satisfied: google-api-core in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.28.0)\n","Requirement already satisfied: google-api-python-client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (2.185.0)\n","Requirement already satisfied: google-auth in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (2.38.0)\n","Requirement already satisfied: google-auth-httplib2 in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 5)) (0.2.0)\n","Requirement already satisfied: gradio in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (5.49.1)\n","Requirement already satisfied: gradio_client in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (1.13.3)\n","Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (0.36.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.17.1)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (7.34.0)\n","Collecting lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.8 kB)\n","Collecting llama-index (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index-0.14.7-py3-none-any.whl.metadata (13 kB)\n","Collecting llama-index-vector-stores-lancedb (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl.metadata (460 bytes)\n","Collecting llama-index-embeddings-huggingface (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14))\n"," Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl.metadata (458 bytes)\n","Collecting llama-index-llms-huggingface-api (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 15))\n"," Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-index-embeddings-openai (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 16))\n"," Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl.metadata (400 bytes)\n","Collecting llama-index-llms-openrouter (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl.metadata (2.3 kB)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (3.9.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 19)) (2.0.2)\n","Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.2.2)\n","Requirement already satisfied: openai in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.109.1)\n","Collecting openai-whisper (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22))\n"," Downloading openai_whisper-20250625.tar.gz (803 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m803.2/803.2 kB\u001b[0m \u001b[31m17.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n"," Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n"," Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: pydantic in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.11.10)\n","Requirement already satisfied: sentence-transformers in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (5.1.2)\n","Collecting yt-dlp (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 25))\n"," Downloading yt_dlp-2025.10.22-py3-none-any.whl.metadata (176 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m176.0/176.0 kB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: spacy in /usr/local/lib/python3.12/dist-packages (from -r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.8.7)\n","Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (2.8)\n","Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 1)) (4.15.0)\n","Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.71.0)\n","Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (5.29.5)\n","Requirement already satisfied: proto-plus<2.0.0,>=1.22.3 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (1.26.1)\n","Requirement already satisfied: requests<3.0.0,>=2.18.0 in /usr/local/lib/python3.12/dist-packages (from google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.32.4)\n","Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (0.31.0)\n","Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.12/dist-packages (from google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (4.2.0)\n","Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (5.5.2)\n","Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.4.2)\n","Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.12/dist-packages (from google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (4.9.1)\n","Requirement already satisfied: aiofiles<25.0,>=22.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (24.1.0)\n","Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.11.0)\n","Requirement already satisfied: brotli>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.1.0)\n","Requirement already satisfied: fastapi<1.0,>=0.115.2 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.120.1)\n","Requirement already satisfied: ffmpy in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.6.4)\n","Requirement already satisfied: groovy~=0.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Requirement already satisfied: httpx<1.0,>=0.24.1 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.28.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.1.6)\n","Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.0.3)\n","Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11.4)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (25.0)\n","Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (11.3.0)\n","Requirement already satisfied: pydub in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.25.1)\n","Requirement already satisfied: python-multipart>=0.0.18 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.20)\n","Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (6.0.3)\n","Requirement already satisfied: ruff>=0.9.3 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.14.2)\n","Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.7)\n","Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2.10.0)\n","Requirement already satisfied: starlette<1.0,>=0.40.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.49.1)\n","Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.13.3)\n","Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.20.0)\n","Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.12/dist-packages (from gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.38.0)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (2025.3.0)\n","Requirement already satisfied: websockets<16.0,>=13.0 in /usr/local/lib/python3.12/dist-packages (from gradio_client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 7)) (15.0.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (3.20.0)\n","Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (4.67.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 8)) (1.2.0)\n","Requirement already satisfied: debugpy>=1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.8.15)\n","Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (7.4.9)\n","Requirement already satisfied: matplotlib-inline>=0.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.2.1)\n","Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (1.6.0)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.5)\n","Requirement already satisfied: pyzmq>=17 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (26.2.1)\n","Requirement already satisfied: tornado>=6.1 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (6.5.1)\n","Requirement already satisfied: traitlets>=5.1.0 in /usr/local/lib/python3.12/dist-packages (from ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.7.1)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (75.2.0)\n","Collecting jedi>=0.16 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (3.0.52)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (2.19.2)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.0)\n","Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.12/dist-packages (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (4.9.0)\n","Collecting deprecation (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading deprecation-2.1.0-py2.py3-none-any.whl.metadata (4.6 kB)\n","Requirement already satisfied: pyarrow>=16 in /usr/local/lib/python3.12/dist-packages (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11)) (18.1.0)\n","Collecting lance-namespace>=0.0.16 (from lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace-0.0.20-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-cli<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_cli-0.5.3-py3-none-any.whl.metadata (1.4 kB)\n","Collecting llama-index-core<0.15.0,>=0.14.7 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_core-0.14.7-py3-none-any.whl.metadata (2.5 kB)\n","Collecting llama-index-indices-managed-llama-cloud>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-index-llms-openai<0.7,>=0.6.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_llms_openai-0.6.6-py3-none-any.whl.metadata (3.0 kB)\n","Collecting llama-index-readers-file<0.6,>=0.5.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_file-0.5.4-py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-index-readers-llama-parse>=0.4.0 (from llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl.metadata (3.1 kB)\n","Collecting pylance (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (2.1 kB)\n","Collecting tantivy (from llama-index-vector-stores-lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 13))\n"," Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.4 kB)\n","Collecting llama-index-llms-openai-like<0.6,>=0.5.0 (from llama-index-llms-openrouter->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 17))\n"," Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl.metadata (1.1 kB)\n","Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (8.3.0)\n","Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (1.5.2)\n","Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 18)) (2024.11.6)\n","Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2.9.0.post0)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (2025.2)\n","Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.9.0)\n","Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (0.11.1)\n","Requirement already satisfied: sniffio in /usr/local/lib/python3.12/dist-packages (from openai->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 21)) (1.3.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.8.0)\n","Requirement already satisfied: numba in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.60.0)\n","Requirement already satisfied: tiktoken in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.12.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.8.0+cu126)\n","Requirement already satisfied: triton>=2 in /usr/local/lib/python3.12/dist-packages (from openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (3.4.0)\n","Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.7.0)\n","Requirement already satisfied: pydantic-core==2.33.2 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (2.33.2)\n","Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.12/dist-packages (from pydantic->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 23)) (0.4.2)\n","Requirement already satisfied: transformers<5.0.0,>=4.41.0 in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (4.57.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.6.1)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (from sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (1.16.3)\n","Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.12)\n","Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.5)\n","Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.0.13)\n","Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.11)\n","Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.0.10)\n","Requirement already satisfied: thinc<8.4.0,>=8.3.4 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (8.3.6)\n","Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.1.3)\n","Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.5.1)\n","Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (2.0.10)\n","Requirement already satisfied: weasel<0.5.0,>=0.1.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.4.1)\n","Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /usr/local/lib/python3.12/dist-packages (from spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (3.5.0)\n","Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.12/dist-packages (from anyio<5.0,>=3.0->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (3.11)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from fastapi<1.0,>=0.115.2->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.0.3)\n","Requirement already satisfied: pyparsing<4,>=3.0.4 in /usr/local/lib/python3.12/dist-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 3)) (3.2.5)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (2025.10.5)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.0.9)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0,>=0.24.1->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.16.0)\n","Requirement already satisfied: aiohttp in /usr/local/lib/python3.12/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (3.13.1)\n","Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.12/dist-packages (from jedi>=0.16->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.8.5)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (0.4)\n","Requirement already satisfied: jupyter-core>=4.9.2 in /usr/local/lib/python3.12/dist-packages (from jupyter-client>=6.1.12->ipykernel->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 9)) (5.9.1)\n","Collecting lance-namespace-urllib3-client (from lance-namespace>=0.0.16->lancedb->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 11))\n"," Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl.metadata (22 kB)\n","Requirement already satisfied: language-data>=1.2 in /usr/local/lib/python3.12/dist-packages (from langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Collecting aiosqlite (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading aiosqlite-0.21.0-py3-none-any.whl.metadata (4.3 kB)\n","Collecting banks<3,>=2.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading banks-2.2.0-py3-none-any.whl.metadata (12 kB)\n","Collecting dataclasses-json (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dataclasses_json-0.6.7-py3-none-any.whl.metadata (25 kB)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n","Collecting dirtyjson<2,>=1.0.8 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading dirtyjson-1.0.8-py3-none-any.whl.metadata (11 kB)\n","Collecting filetype<2,>=1.2.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl.metadata (6.5 kB)\n","Collecting llama-index-workflows!=2.9.0,<3,>=2 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_workflows-2.10.2-py3-none-any.whl.metadata (6.5 kB)\n","Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.5)\n","Requirement already satisfied: platformdirs in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (4.5.0)\n","Collecting setuptools>=18.5 (from ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10))\n"," Using cached setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: sqlalchemy>=1.4.49 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.44)\n","Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.2.0 in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (8.5.0)\n","Collecting typing-inspect>=0.8.0 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading typing_inspect-0.9.0-py3-none-any.whl.metadata (1.5 kB)\n","Requirement already satisfied: wrapt in /usr/local/lib/python3.12/dist-packages (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (2.0.0)\n","Collecting deprecated>=1.2.9.3 (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading Deprecated-1.2.18-py2.py3-none-any.whl.metadata (5.7 kB)\n","Collecting llama-cloud==0.1.35 (from llama-index-indices-managed-llama-cloud>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud-0.1.35-py3-none-any.whl.metadata (1.2 kB)\n","Collecting wrapt (from llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (6.4 kB)\n","Requirement already satisfied: defusedxml>=0.7.1 in /usr/local/lib/python3.12/dist-packages (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (0.7.1)\n","Collecting pypdf<7,>=5.1.0 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading pypdf-6.1.3-py3-none-any.whl.metadata (7.1 kB)\n","Collecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.6,>=0.5.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading striprtf-0.0.26-py3-none-any.whl.metadata (2.1 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.77-py3-none-any.whl.metadata (6.6 kB)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.12/dist-packages (from pexpect>4.3->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.12/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 10)) (0.2.14)\n","Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.12/dist-packages (from pyasn1-modules>=0.2.1->google-auth->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 4)) (0.6.1)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 20)) (1.17.0)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (3.4.4)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests<3.0.0,>=2.18.0->google-api-core->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 2)) (2.5.0)\n","Requirement already satisfied: blis<1.4.0,>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.0)\n","Requirement already satisfied: confection<1.0.0,>=0.0.1 in /usr/local/lib/python3.12/dist-packages (from thinc<8.4.0,>=8.3.4->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.1.5)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.13.3)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.80)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (9.10.2.21)\n","Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.4.1)\n","Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.3.0.4)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (10.3.7.77)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (11.7.1.2)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.5.4.2)\n","Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.7.1)\n","Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (2.27.3)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.77)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (12.6.85)\n","Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.11.1.6)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.22.1)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.12/dist-packages (from transformers<5.0.0,>=4.41.0->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (0.6.2)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (1.5.4)\n","Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.12/dist-packages (from typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (13.9.4)\n","Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (0.23.0)\n","Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in /usr/local/lib/python3.12/dist-packages (from weasel<0.5.0,>=0.1.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (7.4.1)\n","Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.12/dist-packages (from numba->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (0.43.0)\n","Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.12/dist-packages (from scikit-learn->sentence-transformers->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 24)) (3.6.0)\n","Requirement already satisfied: aiohappyeyeballs>=2.5.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (2.6.1)\n","Requirement already satisfied: aiosignal>=1.4.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.4.0)\n","Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (25.4.0)\n","Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.8.0)\n","Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (6.7.0)\n","Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (0.4.1)\n","Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 14)) (1.22.0)\n","Collecting griffe (from banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading griffe-1.14.0-py3-none-any.whl.metadata (5.1 kB)\n","Requirement already satisfied: marisa-trie>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 26)) (1.3.1)\n","Collecting llama-index-instrumentation>=0.1.0 (from llama-index-workflows!=2.9.0,<3,>=2->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_index_instrumentation-0.4.2-py3-none-any.whl.metadata (1.1 kB)\n","Collecting llama-cloud-services>=0.6.77 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.77-py3-none-any.whl.metadata (3.3 kB)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (4.0.0)\n","Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.12/dist-packages (from sqlalchemy>=1.4.49->sqlalchemy[asyncio]>=1.4.49->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (3.2.4)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->openai-whisper->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 22)) (1.3.0)\n","Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading mypy_extensions-1.1.0-py3-none-any.whl.metadata (1.1 kB)\n","Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading marshmallow-3.26.1-py3-none-any.whl.metadata (7.3 kB)\n","INFO: pip is looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.76-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.76 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.76-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.75-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.75 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.75-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.74-py3-none-any.whl.metadata (6.6 kB)\n","INFO: pip is still looking at multiple versions of llama-cloud-services to determine which version is compatible with other requirements. This could take a while.\n","Collecting llama-cloud-services>=0.6.74 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.74-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.73-py3-none-any.whl.metadata (6.6 kB)\n","INFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. See https://pip.pypa.io/warnings/backtracking for guidance. If you want to abort this run, press Ctrl + C.\n","Collecting llama-cloud-services>=0.6.73 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.73-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.72-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.72 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.72-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.71-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.71 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.71-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.70-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.70 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.70-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.69-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.69 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.69-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.68-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.68 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.68-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.67-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.67 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.67-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.66-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.66 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.66-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.65-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.64 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.65-py3-none-any.whl.metadata (3.3 kB)\n"," Downloading llama_cloud_services-0.6.64-py3-none-any.whl.metadata (3.3 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.64-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.63-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.63 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.63-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.62-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.62 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.62-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.60-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.60 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.60-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.59-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.59 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.59-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.58-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.58 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.58-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.57-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.56 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.57-py3-none-any.whl.metadata (3.7 kB)\n"," Downloading llama_cloud_services-0.6.56-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.56-py3-none-any.whl.metadata (6.6 kB)\n"," Downloading llama_parse-0.6.55-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.55 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.55-py3-none-any.whl.metadata (3.7 kB)\n","Collecting llama-parse>=0.5.0 (from llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_parse-0.6.54-py3-none-any.whl.metadata (6.6 kB)\n","Collecting llama-cloud-services>=0.6.54 (from llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading llama_cloud_services-0.6.54-py3-none-any.whl.metadata (3.6 kB)\n","Requirement already satisfied: python-dotenv<2,>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from llama-cloud-services>=0.6.54->llama-parse>=0.5.0->llama-index-readers-llama-parse>=0.4.0->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12)) (1.2.1)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 6)) (0.1.2)\n","Collecting colorama>=0.4 (from griffe->banks<3,>=2.2.0->llama-index-core<0.15.0,>=0.14.7->llama-index->-r /content/drive/MyDrive/ai-accelerator-C2-main/Day_6/session_2/requirements.txt (line 12))\n"," Downloading colorama-0.4.6-py2.py3-none-any.whl.metadata (17 kB)\n","Downloading lancedb-0.25.2-cp39-abi3-manylinux_2_28_x86_64.whl (38.7 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m38.7/38.7 MB\u001b[0m \u001b[31m25.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index-0.14.7-py3-none-any.whl (7.4 kB)\n","Downloading llama_index_vector_stores_lancedb-0.4.1-py3-none-any.whl (7.9 kB)\n","Downloading llama_index_embeddings_huggingface-0.6.1-py3-none-any.whl (8.9 kB)\n","Downloading llama_index_llms_huggingface_api-0.6.1-py3-none-any.whl (7.5 kB)\n","Downloading llama_index_embeddings_openai-0.5.1-py3-none-any.whl (7.0 kB)\n","Downloading llama_index_llms_openrouter-0.4.2-py3-none-any.whl (4.5 kB)\n","Downloading yt_dlp-2025.10.22-py3-none-any.whl (3.2 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m3.2/3.2 MB\u001b[0m \u001b[31m83.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m61.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading lance_namespace-0.0.20-py3-none-any.whl (31 kB)\n","Downloading llama_index_cli-0.5.3-py3-none-any.whl (28 kB)\n","Downloading llama_index_core-0.14.7-py3-none-any.whl (11.9 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.9/11.9 MB\u001b[0m \u001b[31m80.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_indices_managed_llama_cloud-0.9.4-py3-none-any.whl (17 kB)\n","Downloading Deprecated-1.2.18-py2.py3-none-any.whl (10.0 kB)\n","Downloading llama_cloud-0.1.35-py3-none-any.whl (303 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m303.3/303.3 kB\u001b[0m \u001b[31m16.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_llms_openai-0.6.6-py3-none-any.whl (26 kB)\n","Downloading llama_index_llms_openai_like-0.5.3-py3-none-any.whl (4.7 kB)\n","Downloading llama_index_readers_file-0.5.4-py3-none-any.whl (51 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.8/51.8 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_readers_llama_parse-0.5.1-py3-none-any.whl (3.2 kB)\n","Downloading pylance-0.38.3-cp39-abi3-manylinux_2_28_x86_64.whl (48.0 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m48.0/48.0 MB\u001b[0m \u001b[31m21.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hUsing cached setuptools-80.9.0-py3-none-any.whl (1.2 MB)\n","Downloading deprecation-2.1.0-py2.py3-none-any.whl (11 kB)\n","Downloading tantivy-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.1 MB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m4.1/4.1 MB\u001b[0m \u001b[31m87.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading banks-2.2.0-py3-none-any.whl (29 kB)\n","Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n","Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Downloading llama_index_workflows-2.10.2-py3-none-any.whl (90 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m90.7/90.7 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_parse-0.6.54-py3-none-any.whl (4.9 kB)\n","Downloading llama_cloud_services-0.6.54-py3-none-any.whl (63 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m63.9/63.9 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading pypdf-6.1.3-py3-none-any.whl (323 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m323.9/323.9 kB\u001b[0m \u001b[31m26.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n","Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n","Downloading wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (88 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m88.0/88.0 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading aiosqlite-0.21.0-py3-none-any.whl (15 kB)\n","Downloading dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n","Downloading lance_namespace_urllib3_client-0.0.20-py3-none-any.whl (229 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m229.6/229.6 kB\u001b[0m \u001b[31m17.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading llama_index_instrumentation-0.4.2-py3-none-any.whl (15 kB)\n","Downloading marshmallow-3.26.1-py3-none-any.whl (50 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.9/50.9 kB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading mypy_extensions-1.1.0-py3-none-any.whl (5.0 kB)\n","Downloading griffe-1.14.0-py3-none-any.whl (144 kB)\n","\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m144.4/144.4 kB\u001b[0m \u001b[31m13.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hDownloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n","Building wheels for collected packages: openai-whisper\n"," Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for openai-whisper: filename=openai_whisper-20250625-py3-none-any.whl size=803979 sha256=df6b8a2865a9c656c885fb982e01566928267089cd15a8f088b91321fb35968a\n"," Stored in directory: /root/.cache/pip/wheels/61/d2/20/09ec9bef734d126cba375b15898010b6cc28578d8afdde5869\n","Successfully built openai-whisper\n","Installing collected packages: striprtf, filetype, dirtyjson, yt-dlp, wrapt, tantivy, setuptools, pypdf, pylance, mypy-extensions, marshmallow, jedi, deprecation, colorama, aiosqlite, typing-inspect, griffe, deprecated, llama-index-instrumentation, llama-cloud, lance-namespace-urllib3-client, dataclasses-json, banks, openai-whisper, llama-index-workflows, lance-namespace, llama-index-core, lancedb, llama-index-vector-stores-lancedb, llama-index-readers-file, llama-index-llms-openai, llama-index-llms-huggingface-api, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-embeddings-huggingface, llama-cloud-services, llama-parse, llama-index-llms-openai-like, llama-index-cli, llama-index-readers-llama-parse, llama-index-llms-openrouter, llama-index\n"," Attempting uninstall: wrapt\n"," Found existing installation: wrapt 2.0.0\n"," Uninstalling wrapt-2.0.0:\n"," Successfully uninstalled wrapt-2.0.0\n"," Attempting uninstall: setuptools\n"," Found existing installation: setuptools 75.2.0\n"," Uninstalling setuptools-75.2.0:\n"," Successfully uninstalled setuptools-75.2.0\n","Successfully installed aiosqlite-0.21.0 banks-2.2.0 colorama-0.4.6 dataclasses-json-0.6.7 deprecated-1.2.18 deprecation-2.1.0 dirtyjson-1.0.8 filetype-1.2.0 griffe-1.14.0 jedi-0.19.2 lance-namespace-0.0.20 lance-namespace-urllib3-client-0.0.20 lancedb-0.25.2 llama-cloud-0.1.35 llama-cloud-services-0.6.54 llama-index-0.14.7 llama-index-cli-0.5.3 llama-index-core-0.14.7 llama-index-embeddings-huggingface-0.6.1 llama-index-embeddings-openai-0.5.1 llama-index-indices-managed-llama-cloud-0.9.4 llama-index-instrumentation-0.4.2 llama-index-llms-huggingface-api-0.6.1 llama-index-llms-openai-0.6.6 llama-index-llms-openai-like-0.5.3 llama-index-llms-openrouter-0.4.2 llama-index-readers-file-0.5.4 llama-index-readers-llama-parse-0.5.1 llama-index-vector-stores-lancedb-0.4.1 llama-index-workflows-2.10.2 llama-parse-0.6.54 marshmallow-3.26.1 mypy-extensions-1.1.0 openai-whisper-20250625 pylance-0.38.3 pypdf-6.1.3 setuptools-80.9.0 striprtf-0.0.26 tantivy-0.25.0 typing-inspect-0.9.0 wrapt-1.17.3 yt-dlp-2025.10.22\n"]},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["_distutils_hack"]},"id":"c44b2f91af8043c3ae103c955ddb8c15"}},"metadata":{}}]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"tS_6DiHHZFvm","executionInfo":{"status":"ok","timestamp":1762101233293,"user_tz":-330,"elapsed":51,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"632faa21-77bc-42a9-ccfe-0a0377784a00"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… All libraries imported successfully!\n"]}],"source":["# Import all required libraries\n","import gradio as gr\n","import os\n","from pathlib import Path\n","from typing import Dict, List, Optional, Any\n","\n","# LlamaIndex core components\n","from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, Settings\n","from llama_index.vector_stores.lancedb import LanceDBVectorStore\n","from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n","from llama_index.llms.openrouter import OpenRouter\n","\n","# Advanced RAG components\n","from llama_index.core.postprocessor import SimilarityPostprocessor\n","from llama_index.core.response_synthesizers import TreeSummarize, Refine, CompactAndRefine\n","from llama_index.core.retrievers import VectorIndexRetriever\n","\n","print(\"โœ… All libraries imported successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"r87HNpLgZFvp"},"source":["## ๐Ÿค– Part 2: Advanced RAG Backend Class\n","\n","Create an advanced RAG backend that supports dynamic configuration of all parameters.\n"]},{"cell_type":"code","execution_count":3,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":386,"referenced_widgets":["e555075b3e35411da8a8fa2a151d071c","cacc05334346456fba8750702fa72804","1e0b3679aacd4fee8f374db0a9c80c6e","6895b036178344a8b4288bc9622beee9","76146673913a4dfc8058c1ba15b5dc0a","afe9e823311d4f8f98fe74c09b4bc65c","52328b934aa64d0ea8f48d5ee08c4d6f","2581605a93414b8fad1c6917c9e62041","bbf6c319eb91480fa1dd9a735b62117d","5c27b0f135394065919c1aa99328bb74","e7f0690ca1c94934976ec486dc37072f","5da85a1d4aec45f4a7d438187785f2e7","df63701c3c9540278f9e1f9ec72a05cf","007f597dd2e74066b2749182d3e77cbd","13231d4eb7b24594997fdad377e9a39f","0749219f0b4f4d5685b5d1fa2744e5c0","0b6f2f52082f44ecb213ad01fda25649","554fe1d4de3b4140a1bc8bc73f32aad5","9c0700eb9c1345219ead5383c7a5c7c4","0b2a765d805342d0a2b05c6da90d8db4","e955920cb26a440aad247bd03928a99d","85e9c9e790b04f3fb0babf5d36697b57","826e3e3fdd7c44f8ae069ac8cb179b57","88d753427218445e8970c92618c9e8ac","b9df30b27c4d47dca2b18340de6f3b56","3be7ef3867e54a7d98fc7b97de945d35","b728eeea9bbb4d6f8420486e76a27ed8","2d4f4bda9d1741b7a1fd6949a3570e3c","5d7ed8c9b4bc4129baec1ce03527bd10","4c3ad25ebbf54603b5d3e9313c742ac5","2ce5e030c5824644bd20b0f997faf3ff","2bdbf39069564c43a84eb87813cae3ab","df80b039fd1f4771a0e71bfafc219033","b5451d4ea98245509c782b8fe1d25cfc","efb15bc3d2524078ae47ed2b17099e2c","ce3a6a42427b47ddb8dec0b86b803e9d","e4e9564b34ff49eba064a27481b5d1a7","f1d99a3490034b77a72732c83785064d","49f0e8f020a2492ba68e874f4c00fe67","04a5bde86d3b4059a1e1f8a55199c7d8","366da23ff9c2454daddcb04eb9f416c0","b6b286c77de44f869f22e1fc3773005c","b58a7568a3f94b56876e77ed3576fe76","6d82cf3cdf9e4779975a7849593c5e3a","c218eae8906949508dc7fdd629c9e6c3","b6b8c9d1dba7435582d156aee18505a4","9bd3f34817134982bdee81d46a839ae5","5bbc689e0eb3486595f41c1aed1351ea","03a5f56c249448cb8a9c00963576195c","1a5c47d3aef547308d8cf4178b3d277d","c4ba4f2a1daf4233a94e64462601d006","cc94e340844d461caac8491af4171a0f","6b179591713b436c9627d102187dd0e2","ec3fd1520af74520a7a6a5a00fc78035","f4f62182f094460baf830448f3d016a5","bd76ef35f6034a0faa3eafdd92268b65","1b909e7746564a06af03d038b1f9a028","7bfbc726c13343c9af40bc4843cf6e88","a014631e0033471a8281112845e82e77","1bbae592d24d4af9b635c6c01a783b71","7f6d51e4097347219ee26cfc8bfc7487","8bf2267284124019b5db59f5b74ec0fb","fc3b98227c2340519a308e83456d0548","3be865e0fbe1479d950f208ba22c697c","59ba8c325ccb45f9b61b973f19926d33","7aad31231ba54d6183b865df565d2f15","022a6a5527534d6492f1853944362cab","dd05f368aaae4881beda9caf48beb67d","5c8b5945e01743719b75550e8ae2234d","cb37c858fa744c64ab659ff78e3a9fb7","5e108b1d57e24a48938cd4fdc4e2f90d","7575469fb0a34897bffa23cf6e1959ee","ddce348992f1480cacede7855bd69e0b","ccf49349f1b54a45986d54df8cdf06a6","a4e3d8eaae084d41a49aece4ce89cb31","ad7b2c8da94a4125adcae2d18ff632b6","5f02b68046014a6b8482a3814c5ee44a","bc86fc01faf04f0b8e8982dde6aebfee","22ed11bcf43c41f2a6813eae2c5fbfb5","2f6e11fed22842b1a57153781b773758","fcb7052b94fc47f1b7734de1568e17f0","42ad5a129ec844eebe6009fbaeb8c475","61cda27f064b484aa9446e74cef059d2","fc94e5ee2e5e40a7abfc8595fa1b8bba","9ced3f5dc0c341e39f156a51a6de3162","4283b4daac544179ba8d2ac59a6a890a","10f5e0c42d864e2282a393cf169df9b8","2df9ec0f953445d397128e084c33c738","01e6ba1052dc4b939e2116563f3b2df5","9bb7052ed725400ab5a0a050a3f62e51","5bec150283b340ee8c30834c5a1fbaa5","ed69cd4fb29744719b6291e34cdcc8ef","cf994cff745c40b986c387f5bdf1a429","dee358c514304f40a092cfddee6cd4ff","0696f75acf1f4014a565de28ae7c8462","d7347f2c6fa2425588106b7e2a9699f0","b2d78955677045ef8b43f934aaabfd97","61356446b8174f93b6c045e7c0741b0b","005afce015354a6fa0dacf9a01bba361","a999fe81cf984effb2f621da0b338823","27e0a306b76d4ec4a50290ccfdd7755a","539f802d01ef45ecb3c750616d36f3e8","057558df75e947a8af2e5a4538d3c26c","ae265553fe7d4b33b131c6d46ce02f44","00dd28179d6a41d8812aafda5135507c","5cb1be0cd24141f1a32e86b9ea09ec86","282fa42f92e84c4c9a2a886d5d85df4a","54382bef09384ff29564c3d7b6c5ea89","e6d8e6a9d954452bb8adb8d3d101939d","5df80152d588453ea46ae6c73eac0a19","105d74d437fd4d19a2beab025055b969","58679b3e995b413298ccceba14c5cee5","702bc5c5d42a48faa14ee2a661471b2c","827b227da03a4f39b4eebf2ebca7f147","3b03d89f230747c799418596fdd038cb","c6e4f05eceea4daa9f9ed046faa160f6","42e89191795b4c82b3b3970f638950ae","3108cef227b24e0499f26f823e89d1f3","f883d8d9f738476887c97c94dcf184df","999f8bcc0b1d4083922105f549167f1b","8a1597c0aaa84bb29042275aae7de6f2"]},"id":"J9fb_kzHZFvp","executionInfo":{"status":"ok","timestamp":1762101939002,"user_tz":-330,"elapsed":8584,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"d5f1fbab-755d-4857-fbe8-70361f0a1a14"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00 Dict[str, Any]:\n"," \"\"\"Query the RAG system with advanced configuration.\"\"\"\n","\n"," # Check if index exists\n"," if self.index is None:\n"," return {\"response\": \"โŒ Please initialize the database first!\", \"sources\": [], \"config\": {}}\n","\n"," # Check if question is empty\n"," if not question or not question.strip():\n"," return {\"response\": \"โš ๏ธ Please enter a question first!\", \"sources\": [], \"config\": {}}\n","\n"," try:\n"," # Update settings with new parameters\n"," self.update_settings(model, temperature, chunk_size, chunk_overlap)\n","\n"," # Get postprocessors\n"," postprocessors = []\n"," for name in postprocessor_names:\n"," processor = self.get_postprocessor(name, similarity_cutoff)\n"," if processor is not None:\n"," postprocessors.append(processor)\n","\n"," # Get synthesizer\n"," synthesizer = self.get_synthesizer(synthesizer_name)\n","\n"," # Create query engine with all parameters\n"," query_engine_kwargs = {\"similarity_top_k\": similarity_top_k}\n"," if postprocessors:\n"," query_engine_kwargs[\"node_postprocessors\"] = postprocessors\n"," if synthesizer is not None:\n"," query_engine_kwargs[\"response_synthesizer\"] = synthesizer\n","\n"," query_engine = self.index.as_query_engine(**query_engine_kwargs)\n","\n"," # Query and get response\n"," response = query_engine.query(question)\n","\n"," # Extract source information if available\n"," sources = []\n"," if hasattr(response, 'source_nodes'):\n"," for node in response.source_nodes:\n"," sources.append({\n"," \"text\": node.text[:200] + \"...\",\n"," \"score\": getattr(node, 'score', 0.0),\n"," \"source\": getattr(node.node, 'metadata', {}).get('file_name', 'Unknown')\n"," })\n","\n"," return {\n"," \"response\": str(response),\n"," \"sources\": sources,\n"," \"config\": {\n"," \"model\": model,\n"," \"temperature\": temperature,\n"," \"chunk_size\": chunk_size,\n"," \"chunk_overlap\": chunk_overlap,\n"," \"similarity_top_k\": similarity_top_k,\n"," \"postprocessors\": postprocessor_names,\n"," \"similarity_cutoff\": similarity_cutoff,\n"," \"synthesizer\": synthesizer_name\n"," }\n"," }\n","\n"," except Exception as e:\n"," return {\"response\": f\"โŒ Error processing query: {str(e)}\", \"sources\": [], \"config\": {}}\n","\n","# Initialize the backend\n","rag_backend = AdvancedRAGBackend()\n","print(\"๐Ÿš€ Advanced RAG Backend initialized and ready!\")\n"]},{"cell_type":"markdown","metadata":{"id":"BBh6sV0PZFvs"},"source":["## ๐ŸŽจ Part 3: Advanced Gradio Interface\n","\n","Create a sophisticated Gradio interface with all the configuration options specified:\n","1. Database initialization button\n","2. Search query input and button \n","3. Model selection dropdown\n","4. Temperature slider\n","5. Chunk size and overlap inputs\n","6. Similarity top-k slider\n","7. Node postprocessor multiselect\n","8. Similarity cutoff slider\n","9. Response synthesizer multiselect\n"]},{"cell_type":"code","execution_count":4,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"KFi62HgfZFvs","executionInfo":{"status":"ok","timestamp":1762102261840,"user_tz":-330,"elapsed":282,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"ef9f12ed-eb6e-4caf-d2e0-638d338cfc9e"},"outputs":[{"output_type":"stream","name":"stdout","text":["โœ… Advanced RAG interface created successfully!\n"]}],"source":["def create_advanced_rag_interface():\n"," \"\"\"Create advanced RAG interface with full configuration options.\"\"\"\n","\n"," def initialize_db():\n"," \"\"\"Handle database initialization.\"\"\"\n"," return rag_backend.initialize_database()\n","\n"," def handle_advanced_query(question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer):\n"," \"\"\"Handle advanced RAG queries with all configuration options.\"\"\"\n"," result = rag_backend.advanced_query(\n"," question, model, temperature, chunk_size, chunk_overlap,\n"," similarity_top_k, postprocessors, similarity_cutoff, synthesizer\n"," )\n","\n"," # Format configuration for display\n"," config_text = f\"\"\"**Current Configuration:**\n","- Model: {result['config'].get('model', 'N/A')}\n","- Temperature: {result['config'].get('temperature', 'N/A')}\n","- Chunk Size: {result['config'].get('chunk_size', 'N/A')}\n","- Chunk Overlap: {result['config'].get('chunk_overlap', 'N/A')}\n","- Similarity Top-K: {result['config'].get('similarity_top_k', 'N/A')}\n","- Postprocessors: {', '.join(result['config'].get('postprocessors', []))}\n","- Similarity Cutoff: {result['config'].get('similarity_cutoff', 'N/A')}\n","- Synthesizer: {result['config'].get('synthesizer', 'N/A')}\"\"\"\n","\n"," return result[\"response\"], config_text\n","\n"," # TODO: Create the advanced interface structure\n"," # Hint: This interface needs more complex layout with configuration controls\n","\n"," with gr.Blocks(title=\"Advanced RAG Assistant\") as interface:\n"," # TODO: Add title and description\n"," # Hint: Use gr.Markdown() for formatted text\n","\n"," # Your title and description here:\n"," gr.Markdown(\"# ๐Ÿค– Advanced RAG Assistant\")\n"," gr.Markdown(\"Configure all RAG parameters for optimal performance and experiment with different settings!\")\n","\n","\n"," # TODO: Add database initialization section\n"," # Hint: Use gr.Button() for initialization and gr.Textbox() for status\n"," init_btn = gr.Button(\"๐Ÿ”„ Initialize Vector Database\", variant=\"primary\")\n"," status_output = gr.Textbox(label=\"Database Status\", lines=2, interactive=False)\n","\n"," # status_output = ?\n","\n","\n"," # TODO: Create main layout with columns\n"," # Hint: Configuration controls on left, query/response on right makes sense\n"," # Use gr.Row() and gr.Column() to organize this\n","\n"," with gr.Row():\n"," with gr.Column(scale=1):\n","\n"," gr.Markdown(\"### โš™๏ธ RAG Configuration\")\n","\n"," # TODO: Model selection\n"," # Hint: Use gr.Dropdown() with choices=[\"gpt-4o\", \"gpt-4o-mini\"]\n"," model_dropdown = gr.Dropdown(\n"," choices=[\"gpt-4o\", \"gpt-4o-mini\"],\n"," value=\"gpt-4o-mini\",\n"," label=\"LLM Model\"\n"," )\n","\n","\n","\n"," # TODO: Temperature control\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.1\n"," temperature_slider = gr.Slider(\n"," minimum=0.0, maximum=1.0, step=0.1, value=0.1,\n"," label=\"Temperature (0=deterministic, 1=creative)\"\n"," )\n","\n","\n","\n"," # TODO: Chunking parameters\n"," # Hint: Use gr.Number() for numeric inputs with default values\n"," chunk_size_input = gr.Number(\n"," value=512, minimum=128, maximum=2048,\n"," label=\"Chunk Size\"\n"," )\n","\n"," chunk_overlap_input = gr.Number(\n"," value=50, minimum=0, maximum=200,\n"," label=\"Chunk Overlap\"\n"," )\n","\n","\n"," # TODO: Retrieval parameters\n"," # Hint: Use gr.Slider() with minimum=1, maximum=20, step=1, value=5\n"," similarity_topk_slider = gr.Slider(\n"," minimum=1, maximum=20, step=1, value=5,\n"," label=\"Similarity Top-K (documents to retrieve)\"\n"," )\n","\n","\n"," # TODO: Postprocessor selection\n"," # Hint: Use gr.CheckboxGroup() with choices=[\"SimilarityPostprocessor\"]\n"," postprocessor_checkbox = gr.CheckboxGroup(\n"," choices=[\"SimilarityPostprocessor\"],\n"," value=[\"SimilarityPostprocessor\"],\n"," label=\"Node Postprocessors\"\n"," )\n","\n","\n"," # TODO: Similarity filtering\n"," # Hint: Use gr.Slider() with minimum=0.0, maximum=1.0, step=0.1, value=0.3\n"," similarity_cutoff_slider = gr.Slider(\n"," minimum=0.0, maximum=1.0, step=0.1, value=0.3,\n"," label=\"Similarity Cutoff (0=permissive, 1=strict)\"\n"," )\n","\n","\n"," # TODO: Response synthesizer\n"," # Hint: Use gr.Dropdown() with choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"]\n"," synthesizer_dropdown = gr.Dropdown(\n"," choices=[\"TreeSummarize\", \"Refine\", \"CompactAndRefine\", \"Default\"],\n"," value=\"TreeSummarize\",\n"," label=\"Response Synthesizer\"\n"," )\n","\n","\n","\n"," with gr.Column(scale=2):\n"," gr.Markdown(\"### ๐Ÿ’ฌ Query Interface\")\n","\n"," # TODO: Query input\n"," # Hint: Use gr.Textbox() with label=\"Ask a question\", placeholder text, lines=3\n"," query_input = gr.Textbox(\n"," label=\"Ask a question\",\n"," placeholder=\"Enter your question about the documents...\",\n"," lines=3\n"," )\n","\n","\n"," # TODO: Submit button\n"," # Hint: Use gr.Button() with variant=\"primary\"\n"," submit_btn = gr.Button(\"๐Ÿš€ Ask Question\", variant=\"primary\")\n","\n","\n"," # TODO: Response output\n"," # Hint: Use gr.Textbox() with lines=12, interactive=False\n"," response_output = gr.Textbox(\n"," label=\"AI Response\",\n"," lines=12,\n"," interactive=False\n"," )\n","\n","\n"," # TODO: Configuration display\n"," # Hint: Use gr.Textbox() with lines=8, interactive=False\n"," config_display = gr.Textbox(\n"," label=\"Configuration Used\",\n"," lines=8,\n"," interactive=False\n"," )\n","\n","\n","\n"," # Uncomment to Connect functions to components\n"," init_btn.click(initialize_db, outputs=[status_output])\n","\n"," submit_btn.click(\n"," handle_advanced_query,\n"," inputs=[\n"," query_input, model_dropdown, temperature_slider,\n"," chunk_size_input, chunk_overlap_input, similarity_topk_slider,\n"," postprocessor_checkbox, similarity_cutoff_slider, synthesizer_dropdown\n"," ],\n"," outputs=[response_output, config_display]\n"," )\n","\n","\n"," return interface\n","\n","# Create the interface\n","advanced_interface = create_advanced_rag_interface()\n","print(\"โœ… Advanced RAG interface created successfully!\")\n"]},{"cell_type":"markdown","metadata":{"id":"W7n-oefpZFvu"},"source":["## ๐Ÿš€ Part 4: Launch Your Advanced Application\n","\n","Launch your advanced Gradio application and test all the configuration options!\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"bLK1trbKZFvu","executionInfo":{"status":"ok","timestamp":1762102278662,"user_tz":-330,"elapsed":972,"user":{"displayName":"Monalisa Samal","userId":"00386399154790621171"}},"outputId":"688af962-c7f6-4a1c-ccdf-1b1712c82c9c"},"outputs":[{"output_type":"stream","name":"stdout","text":["๐ŸŽ‰ Launching your Advanced RAG Assistant...\n","๐Ÿ”— Your application will open in a new browser tab!\n","\n","โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\n","\n","๐Ÿ“‹ Testing Instructions:\n","1. Click 'Initialize Vector Database' button first\n","2. Wait for success message\n","3. Configure your RAG parameters:\n"," - Choose model (gpt-4o, gpt-4o-mini)\n"," - Adjust temperature (0.0 = deterministic, 1.0 = creative)\n"," - Set chunk size and overlap\n"," - Choose similarity top-k\n"," - Select postprocessors and synthesizer\n","4. Enter a question and click 'Ask Question'\n","5. Review both the response and configuration used\n","\n","๐Ÿงช Experiments to try:\n","- Compare different models with the same question\n","- Test temperature effects (0.1 vs 0.9)\n","- Try different chunk sizes (256 vs 1024)\n","- Compare synthesizers (TreeSummarize vs Refine)\n","- Adjust similarity cutoff to filter results\n","It looks like you are running Gradio on a hosted Jupyter notebook, which requires `share=True`. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n","\n","Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n","* Running on public URL: https://53611b3c5e55b9f39f.gradio.live\n","\n","This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"]},{"output_type":"display_data","data":{"text/plain":[""],"text/html":["
"]},"metadata":{}},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":5}],"source":["print(\"๐ŸŽ‰ Launching your Advanced RAG Assistant...\")\n","print(\"๐Ÿ”— Your application will open in a new browser tab!\")\n","print(\"\")\n","print(\"โš ๏ธ Make sure your OPENROUTER_API_KEY environment variable is set!\")\n","print(\"\")\n","print(\"๐Ÿ“‹ Testing Instructions:\")\n","print(\"1. Click 'Initialize Vector Database' button first\")\n","print(\"2. Wait for success message\")\n","print(\"3. Configure your RAG parameters:\")\n","print(\" - Choose model (gpt-4o, gpt-4o-mini)\")\n","print(\" - Adjust temperature (0.0 = deterministic, 1.0 = creative)\")\n","print(\" - Set chunk size and overlap\")\n","print(\" - Choose similarity top-k\")\n","print(\" - Select postprocessors and synthesizer\")\n","print(\"4. Enter a question and click 'Ask Question'\")\n","print(\"5. Review both the response and configuration used\")\n","print(\"\")\n","print(\"๐Ÿงช Experiments to try:\")\n","print(\"- Compare different models with the same question\")\n","print(\"- Test temperature effects (0.1 vs 0.9)\")\n","print(\"- Try different chunk sizes (256 vs 1024)\")\n","print(\"- Compare synthesizers (TreeSummarize vs Refine)\")\n","print(\"- Adjust similarity cutoff to filter results\")\n","\n","# Your code here:\n","advanced_interface.launch()"]},{"cell_type":"markdown","metadata":{"id":"jFejTb9EZFvv"},"source":["## ๐Ÿ’ก Understanding the Configuration Options\n","\n","### Model Selection\n","- **gpt-4o**: Latest and most capable model, best quality responses\n","- **gpt-4o-mini**: Faster and cheaper while maintaining good quality\n","\n","### Temperature (0.0 - 1.0)\n","- **0.0-0.3**: Deterministic, factual responses\n","- **0.4-0.7**: Balanced creativity and accuracy\n","- **0.8-1.0**: More creative and varied responses\n","\n","### Chunk Size & Overlap\n","- **Chunk Size**: How much text to process at once (256-1024 typical)\n","- **Chunk Overlap**: Overlap between chunks to maintain context (10-100 typical)\n","\n","### Similarity Top-K (1-20)\n","- **Lower values (3-5)**: More focused, faster responses\n","- **Higher values (8-15)**: More comprehensive, detailed responses\n","\n","### Node Postprocessors\n","- **SimilarityPostprocessor**: Filters out low-relevance documents\n","\n","### Similarity Cutoff (0.0-1.0)\n","- **0.1-0.3**: More permissive, includes potentially relevant docs\n","- **0.5-0.8**: More strict, only highly relevant docs\n","\n","### Response Synthesizers\n","- **TreeSummarize**: Hierarchical summarization, good for complex topics\n","- **Refine**: Iterative refinement, builds detailed responses\n","- **CompactAndRefine**: Efficient version of Refine\n","- **Default**: Standard synthesis approach\n"]},{"cell_type":"markdown","metadata":{"id":"8oaw_QtBZFvv"},"source":["## โœ… Assignment Completion Checklist\n","\n","Before submitting, ensure you have:\n","\n","- [ ] Set up your OPENROUTER_API_KEY environment variable\n","- [ ] Imported all necessary libraries including advanced RAG components\n","- [ ] Created AdvancedRAGBackend class with configurable parameters\n","- [ ] Implemented all required methods:\n"," - [ ] `update_settings()` - Updates LLM and chunking parameters\n"," - [ ] `initialize_database()` - Sets up vector database\n"," - [ ] `get_postprocessor()` - Returns selected postprocessor\n"," - [ ] `get_synthesizer()` - Returns selected synthesizer\n"," - [ ] `advanced_query()` - Handles queries with all configuration options\n","- [ ] Created advanced Gradio interface with all required components:\n"," - [ ] Initialize database button\n"," - [ ] Model selection dropdown (gpt-4o, gpt-4o-mini)\n"," - [ ] Temperature slider (0 to 1, step 0.1)\n"," - [ ] Chunk size input (default 512)\n"," - [ ] Chunk overlap input (default 50)\n"," - [ ] Similarity top-k slider (1 to 20, default 5)\n"," - [ ] Node postprocessor multiselect\n"," - [ ] Similarity cutoff slider (0.0 to 1.0, step 0.1, default 0.3)\n"," - [ ] Response synthesizer dropdown\n"," - [ ] Query input and submit button\n"," - [ ] Response output\n"," - [ ] Configuration display\n","- [ ] Connected all components to backend functions\n","- [ ] Successfully launched the application\n","- [ ] Tested different parameter combinations\n","- [ ] Verified all configuration options work correctly\n","\n","## ๐ŸŽŠ Congratulations!\n","\n","You've successfully built a professional, production-ready RAG application! You now have:\n","\n","- **Advanced Parameter Control**: Full control over all RAG system parameters\n","- **Professional UI**: Clean, organized interface with proper layout\n","- **Real-time Configuration**: Ability to experiment with different settings\n","- **Production Patterns**: Understanding of how to build scalable AI applications\n","\n","## ๐Ÿš€ Next Steps & Extensions\n","\n","**Potential Enhancements:**\n","1. **Authentication**: Add user login and session management\n","2. **Document Upload**: Allow users to upload their own documents\n","3. **Chat History**: Implement conversation memory\n","4. **Performance Monitoring**: Add response time and quality metrics\n","5. **A/B Testing**: Compare different configurations side-by-side\n","6. **Export Features**: Download responses and configurations\n","7. **Advanced Visualizations**: Show document similarity scores and retrieval paths\n","\n","**Deployment Options:**\n","- **Local**: Run on your machine for development\n","- **Gradio Cloud**: Deploy with `interface.launch(share=True)`\n","- **Hugging Face Spaces**: Deploy to Hugging Face for public access\n","- **Docker**: Containerize for scalable deployment\n","- **Cloud Platforms**: Deploy to AWS, GCP, or Azure\n","\n","You're now ready to build sophisticated AI-powered applications!\n"]}],"metadata":{"kernelspec":{"display_name":"accelerator","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"colab":{"provenance":[]},"widgets":{"application/vnd.jupyter.widget-state+json":{"e555075b3e35411da8a8fa2a151d071c":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_cacc05334346456fba8750702fa72804","IPY_MODEL_1e0b3679aacd4fee8f374db0a9c80c6e","IPY_MODEL_6895b036178344a8b4288bc9622beee9"],"layout":"IPY_MODEL_76146673913a4dfc8058c1ba15b5dc0a"}},"cacc05334346456fba8750702fa72804":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_afe9e823311d4f8f98fe74c09b4bc65c","placeholder":"โ€‹","style":"IPY_MODEL_52328b934aa64d0ea8f48d5ee08c4d6f","value":"modules.json:โ€‡100%"}},"1e0b3679aacd4fee8f374db0a9c80c6e":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_2581605a93414b8fad1c6917c9e62041","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_bbf6c319eb91480fa1dd9a735b62117d","value":349}},"6895b036178344a8b4288bc9622beee9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5c27b0f135394065919c1aa99328bb74","placeholder":"โ€‹","style":"IPY_MODEL_e7f0690ca1c94934976ec486dc37072f","value":"โ€‡349/349โ€‡[00:00<00:00,โ€‡28.3kB/s]"}},"76146673913a4dfc8058c1ba15b5dc0a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"afe9e823311d4f8f98fe74c09b4bc65c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"52328b934aa64d0ea8f48d5ee08c4d6f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2581605a93414b8fad1c6917c9e62041":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bbf6c319eb91480fa1dd9a735b62117d":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5c27b0f135394065919c1aa99328bb74":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e7f0690ca1c94934976ec486dc37072f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5da85a1d4aec45f4a7d438187785f2e7":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_df63701c3c9540278f9e1f9ec72a05cf","IPY_MODEL_007f597dd2e74066b2749182d3e77cbd","IPY_MODEL_13231d4eb7b24594997fdad377e9a39f"],"layout":"IPY_MODEL_0749219f0b4f4d5685b5d1fa2744e5c0"}},"df63701c3c9540278f9e1f9ec72a05cf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0b6f2f52082f44ecb213ad01fda25649","placeholder":"โ€‹","style":"IPY_MODEL_554fe1d4de3b4140a1bc8bc73f32aad5","value":"config_sentence_transformers.json:โ€‡100%"}},"007f597dd2e74066b2749182d3e77cbd":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_9c0700eb9c1345219ead5383c7a5c7c4","max":124,"min":0,"orientation":"horizontal","style":"IPY_MODEL_0b2a765d805342d0a2b05c6da90d8db4","value":124}},"13231d4eb7b24594997fdad377e9a39f":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e955920cb26a440aad247bd03928a99d","placeholder":"โ€‹","style":"IPY_MODEL_85e9c9e790b04f3fb0babf5d36697b57","value":"โ€‡124/124โ€‡[00:00<00:00,โ€‡11.7kB/s]"}},"0749219f0b4f4d5685b5d1fa2744e5c0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b6f2f52082f44ecb213ad01fda25649":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"554fe1d4de3b4140a1bc8bc73f32aad5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9c0700eb9c1345219ead5383c7a5c7c4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b2a765d805342d0a2b05c6da90d8db4":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e955920cb26a440aad247bd03928a99d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"85e9c9e790b04f3fb0babf5d36697b57":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"826e3e3fdd7c44f8ae069ac8cb179b57":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_88d753427218445e8970c92618c9e8ac","IPY_MODEL_b9df30b27c4d47dca2b18340de6f3b56","IPY_MODEL_3be7ef3867e54a7d98fc7b97de945d35"],"layout":"IPY_MODEL_b728eeea9bbb4d6f8420486e76a27ed8"}},"88d753427218445e8970c92618c9e8ac":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2d4f4bda9d1741b7a1fd6949a3570e3c","placeholder":"โ€‹","style":"IPY_MODEL_5d7ed8c9b4bc4129baec1ce03527bd10","value":"README.md:โ€‡"}},"b9df30b27c4d47dca2b18340de6f3b56":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_4c3ad25ebbf54603b5d3e9313c742ac5","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2ce5e030c5824644bd20b0f997faf3ff","value":1}},"3be7ef3867e54a7d98fc7b97de945d35":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2bdbf39069564c43a84eb87813cae3ab","placeholder":"โ€‹","style":"IPY_MODEL_df80b039fd1f4771a0e71bfafc219033","value":"โ€‡94.8k/?โ€‡[00:00<00:00,โ€‡4.38MB/s]"}},"b728eeea9bbb4d6f8420486e76a27ed8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2d4f4bda9d1741b7a1fd6949a3570e3c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5d7ed8c9b4bc4129baec1ce03527bd10":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4c3ad25ebbf54603b5d3e9313c742ac5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"2ce5e030c5824644bd20b0f997faf3ff":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2bdbf39069564c43a84eb87813cae3ab":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"df80b039fd1f4771a0e71bfafc219033":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b5451d4ea98245509c782b8fe1d25cfc":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_efb15bc3d2524078ae47ed2b17099e2c","IPY_MODEL_ce3a6a42427b47ddb8dec0b86b803e9d","IPY_MODEL_e4e9564b34ff49eba064a27481b5d1a7"],"layout":"IPY_MODEL_f1d99a3490034b77a72732c83785064d"}},"efb15bc3d2524078ae47ed2b17099e2c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_49f0e8f020a2492ba68e874f4c00fe67","placeholder":"โ€‹","style":"IPY_MODEL_04a5bde86d3b4059a1e1f8a55199c7d8","value":"sentence_bert_config.json:โ€‡100%"}},"ce3a6a42427b47ddb8dec0b86b803e9d":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_366da23ff9c2454daddcb04eb9f416c0","max":52,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b6b286c77de44f869f22e1fc3773005c","value":52}},"e4e9564b34ff49eba064a27481b5d1a7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b58a7568a3f94b56876e77ed3576fe76","placeholder":"โ€‹","style":"IPY_MODEL_6d82cf3cdf9e4779975a7849593c5e3a","value":"โ€‡52.0/52.0โ€‡[00:00<00:00,โ€‡4.65kB/s]"}},"f1d99a3490034b77a72732c83785064d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"49f0e8f020a2492ba68e874f4c00fe67":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"04a5bde86d3b4059a1e1f8a55199c7d8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"366da23ff9c2454daddcb04eb9f416c0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b6b286c77de44f869f22e1fc3773005c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b58a7568a3f94b56876e77ed3576fe76":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6d82cf3cdf9e4779975a7849593c5e3a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c218eae8906949508dc7fdd629c9e6c3":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_b6b8c9d1dba7435582d156aee18505a4","IPY_MODEL_9bd3f34817134982bdee81d46a839ae5","IPY_MODEL_5bbc689e0eb3486595f41c1aed1351ea"],"layout":"IPY_MODEL_03a5f56c249448cb8a9c00963576195c"}},"b6b8c9d1dba7435582d156aee18505a4":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1a5c47d3aef547308d8cf4178b3d277d","placeholder":"โ€‹","style":"IPY_MODEL_c4ba4f2a1daf4233a94e64462601d006","value":"config.json:โ€‡100%"}},"9bd3f34817134982bdee81d46a839ae5":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_cc94e340844d461caac8491af4171a0f","max":743,"min":0,"orientation":"horizontal","style":"IPY_MODEL_6b179591713b436c9627d102187dd0e2","value":743}},"5bbc689e0eb3486595f41c1aed1351ea":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ec3fd1520af74520a7a6a5a00fc78035","placeholder":"โ€‹","style":"IPY_MODEL_f4f62182f094460baf830448f3d016a5","value":"โ€‡743/743โ€‡[00:00<00:00,โ€‡67.3kB/s]"}},"03a5f56c249448cb8a9c00963576195c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1a5c47d3aef547308d8cf4178b3d277d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c4ba4f2a1daf4233a94e64462601d006":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"cc94e340844d461caac8491af4171a0f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6b179591713b436c9627d102187dd0e2":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ec3fd1520af74520a7a6a5a00fc78035":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f4f62182f094460baf830448f3d016a5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bd76ef35f6034a0faa3eafdd92268b65":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_1b909e7746564a06af03d038b1f9a028","IPY_MODEL_7bfbc726c13343c9af40bc4843cf6e88","IPY_MODEL_a014631e0033471a8281112845e82e77"],"layout":"IPY_MODEL_1bbae592d24d4af9b635c6c01a783b71"}},"1b909e7746564a06af03d038b1f9a028":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7f6d51e4097347219ee26cfc8bfc7487","placeholder":"โ€‹","style":"IPY_MODEL_8bf2267284124019b5db59f5b74ec0fb","value":"model.safetensors:โ€‡100%"}},"7bfbc726c13343c9af40bc4843cf6e88":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_fc3b98227c2340519a308e83456d0548","max":133466304,"min":0,"orientation":"horizontal","style":"IPY_MODEL_3be865e0fbe1479d950f208ba22c697c","value":133466304}},"a014631e0033471a8281112845e82e77":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_59ba8c325ccb45f9b61b973f19926d33","placeholder":"โ€‹","style":"IPY_MODEL_7aad31231ba54d6183b865df565d2f15","value":"โ€‡133M/133Mโ€‡[00:01<00:00,โ€‡119MB/s]"}},"1bbae592d24d4af9b635c6c01a783b71":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7f6d51e4097347219ee26cfc8bfc7487":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8bf2267284124019b5db59f5b74ec0fb":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"fc3b98227c2340519a308e83456d0548":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3be865e0fbe1479d950f208ba22c697c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"59ba8c325ccb45f9b61b973f19926d33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7aad31231ba54d6183b865df565d2f15":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"022a6a5527534d6492f1853944362cab":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_dd05f368aaae4881beda9caf48beb67d","IPY_MODEL_5c8b5945e01743719b75550e8ae2234d","IPY_MODEL_cb37c858fa744c64ab659ff78e3a9fb7"],"layout":"IPY_MODEL_5e108b1d57e24a48938cd4fdc4e2f90d"}},"dd05f368aaae4881beda9caf48beb67d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7575469fb0a34897bffa23cf6e1959ee","placeholder":"โ€‹","style":"IPY_MODEL_ddce348992f1480cacede7855bd69e0b","value":"tokenizer_config.json:โ€‡100%"}},"5c8b5945e01743719b75550e8ae2234d":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ccf49349f1b54a45986d54df8cdf06a6","max":366,"min":0,"orientation":"horizontal","style":"IPY_MODEL_a4e3d8eaae084d41a49aece4ce89cb31","value":366}},"cb37c858fa744c64ab659ff78e3a9fb7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ad7b2c8da94a4125adcae2d18ff632b6","placeholder":"โ€‹","style":"IPY_MODEL_5f02b68046014a6b8482a3814c5ee44a","value":"โ€‡366/366โ€‡[00:00<00:00,โ€‡19.0kB/s]"}},"5e108b1d57e24a48938cd4fdc4e2f90d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7575469fb0a34897bffa23cf6e1959ee":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ddce348992f1480cacede7855bd69e0b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ccf49349f1b54a45986d54df8cdf06a6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a4e3d8eaae084d41a49aece4ce89cb31":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ad7b2c8da94a4125adcae2d18ff632b6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5f02b68046014a6b8482a3814c5ee44a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bc86fc01faf04f0b8e8982dde6aebfee":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_22ed11bcf43c41f2a6813eae2c5fbfb5","IPY_MODEL_2f6e11fed22842b1a57153781b773758","IPY_MODEL_fcb7052b94fc47f1b7734de1568e17f0"],"layout":"IPY_MODEL_42ad5a129ec844eebe6009fbaeb8c475"}},"22ed11bcf43c41f2a6813eae2c5fbfb5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_61cda27f064b484aa9446e74cef059d2","placeholder":"โ€‹","style":"IPY_MODEL_fc94e5ee2e5e40a7abfc8595fa1b8bba","value":"vocab.txt:โ€‡"}},"2f6e11fed22842b1a57153781b773758":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_9ced3f5dc0c341e39f156a51a6de3162","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4283b4daac544179ba8d2ac59a6a890a","value":1}},"fcb7052b94fc47f1b7734de1568e17f0":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_10f5e0c42d864e2282a393cf169df9b8","placeholder":"โ€‹","style":"IPY_MODEL_2df9ec0f953445d397128e084c33c738","value":"โ€‡232k/?โ€‡[00:00<00:00,โ€‡3.62MB/s]"}},"42ad5a129ec844eebe6009fbaeb8c475":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"61cda27f064b484aa9446e74cef059d2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fc94e5ee2e5e40a7abfc8595fa1b8bba":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9ced3f5dc0c341e39f156a51a6de3162":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"4283b4daac544179ba8d2ac59a6a890a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"10f5e0c42d864e2282a393cf169df9b8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2df9ec0f953445d397128e084c33c738":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"01e6ba1052dc4b939e2116563f3b2df5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_9bb7052ed725400ab5a0a050a3f62e51","IPY_MODEL_5bec150283b340ee8c30834c5a1fbaa5","IPY_MODEL_ed69cd4fb29744719b6291e34cdcc8ef"],"layout":"IPY_MODEL_cf994cff745c40b986c387f5bdf1a429"}},"9bb7052ed725400ab5a0a050a3f62e51":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_dee358c514304f40a092cfddee6cd4ff","placeholder":"โ€‹","style":"IPY_MODEL_0696f75acf1f4014a565de28ae7c8462","value":"tokenizer.json:โ€‡"}},"5bec150283b340ee8c30834c5a1fbaa5":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_d7347f2c6fa2425588106b7e2a9699f0","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b2d78955677045ef8b43f934aaabfd97","value":1}},"ed69cd4fb29744719b6291e34cdcc8ef":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_61356446b8174f93b6c045e7c0741b0b","placeholder":"โ€‹","style":"IPY_MODEL_005afce015354a6fa0dacf9a01bba361","value":"โ€‡711k/?โ€‡[00:00<00:00,โ€‡29.1MB/s]"}},"cf994cff745c40b986c387f5bdf1a429":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dee358c514304f40a092cfddee6cd4ff":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0696f75acf1f4014a565de28ae7c8462":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d7347f2c6fa2425588106b7e2a9699f0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"b2d78955677045ef8b43f934aaabfd97":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"61356446b8174f93b6c045e7c0741b0b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"005afce015354a6fa0dacf9a01bba361":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a999fe81cf984effb2f621da0b338823":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_27e0a306b76d4ec4a50290ccfdd7755a","IPY_MODEL_539f802d01ef45ecb3c750616d36f3e8","IPY_MODEL_057558df75e947a8af2e5a4538d3c26c"],"layout":"IPY_MODEL_ae265553fe7d4b33b131c6d46ce02f44"}},"27e0a306b76d4ec4a50290ccfdd7755a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_00dd28179d6a41d8812aafda5135507c","placeholder":"โ€‹","style":"IPY_MODEL_5cb1be0cd24141f1a32e86b9ea09ec86","value":"special_tokens_map.json:โ€‡100%"}},"539f802d01ef45ecb3c750616d36f3e8":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_282fa42f92e84c4c9a2a886d5d85df4a","max":125,"min":0,"orientation":"horizontal","style":"IPY_MODEL_54382bef09384ff29564c3d7b6c5ea89","value":125}},"057558df75e947a8af2e5a4538d3c26c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e6d8e6a9d954452bb8adb8d3d101939d","placeholder":"โ€‹","style":"IPY_MODEL_5df80152d588453ea46ae6c73eac0a19","value":"โ€‡125/125โ€‡[00:00<00:00,โ€‡11.9kB/s]"}},"ae265553fe7d4b33b131c6d46ce02f44":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"00dd28179d6a41d8812aafda5135507c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5cb1be0cd24141f1a32e86b9ea09ec86":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"282fa42f92e84c4c9a2a886d5d85df4a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"54382bef09384ff29564c3d7b6c5ea89":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e6d8e6a9d954452bb8adb8d3d101939d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5df80152d588453ea46ae6c73eac0a19":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"105d74d437fd4d19a2beab025055b969":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_58679b3e995b413298ccceba14c5cee5","IPY_MODEL_702bc5c5d42a48faa14ee2a661471b2c","IPY_MODEL_827b227da03a4f39b4eebf2ebca7f147"],"layout":"IPY_MODEL_3b03d89f230747c799418596fdd038cb"}},"58679b3e995b413298ccceba14c5cee5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c6e4f05eceea4daa9f9ed046faa160f6","placeholder":"โ€‹","style":"IPY_MODEL_42e89191795b4c82b3b3970f638950ae","value":"config.json:โ€‡100%"}},"702bc5c5d42a48faa14ee2a661471b2c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3108cef227b24e0499f26f823e89d1f3","max":190,"min":0,"orientation":"horizontal","style":"IPY_MODEL_f883d8d9f738476887c97c94dcf184df","value":190}},"827b227da03a4f39b4eebf2ebca7f147":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_999f8bcc0b1d4083922105f549167f1b","placeholder":"โ€‹","style":"IPY_MODEL_8a1597c0aaa84bb29042275aae7de6f2","value":"โ€‡190/190โ€‡[00:00<00:00,โ€‡18.3kB/s]"}},"3b03d89f230747c799418596fdd038cb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c6e4f05eceea4daa9f9ed046faa160f6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"42e89191795b4c82b3b3970f638950ae":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3108cef227b24e0499f26f823e89d1f3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f883d8d9f738476887c97c94dcf184df":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"999f8bcc0b1d4083922105f549167f1b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8a1597c0aaa84bb29042275aae7de6f2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/NIRANJAN_SINGH/README.md b/NIRANJAN_SINGH/README.md deleted file mode 100644 index 541f33b..0000000 --- a/NIRANJAN_SINGH/README.md +++ /dev/null @@ -1 +0,0 @@ -# NIRANJAN_SINGH diff --git a/Nandini_Reddy/Assignment.txt b/Nandini_Reddy/Assignment.txt deleted file mode 100644 index c6e4831..0000000 --- a/Nandini_Reddy/Assignment.txt +++ /dev/null @@ -1,17 +0,0 @@ -Day1 -1.Build a chat completion model using code completion API and Open AI API key - -Day2: -1.Build a text summarizer in python using Hugging face, Gradio - -Day3: -1.Build a chat model with additional features using streamlit - -Day4: -1.Build a linkedin post generator using n8n - -Day5: -1.Build a linkedin post AI automation to post content in linkedin using n8n, Bolt - -Day6: -1.Build a RAG system with Ollama Gemma:1B, LanceDB , LlamaIndex diff --git a/Nandini_Reddy/Day2/copy_of_untitled0.py b/Nandini_Reddy/Day2/copy_of_untitled0.py deleted file mode 100644 index 08c76d3..0000000 --- a/Nandini_Reddy/Day2/copy_of_untitled0.py +++ /dev/null @@ -1,116 +0,0 @@ -# -*- coding: utf-8 -*- -"""Copy of Untitled0.ipynb - -Automatically generated by Colab. - -Original file is located at - https://colab.research.google.com/drive/1XYlZfuisho_ZDYC46N1JPnjcKIKBbo0h - -GRADIO SYNTAX -""" - - - -import gradio as gr - -def greet(name): - return "Hello, " + name + "!" - -demo = gr.Interface(fn=greet, inputs="text", outputs="text") - -# To create a shareable link (valid for 72 hours) -demo.launch(share=True) - -"""HUGGING FACE TOKEN LOGISTICS""" - -!pip install huggingface_hub - -from huggingface_hub import whoami -from google.colab import userdata - -# Get your Hugging Face token from Colab Secrets -hf_token = userdata.get('HF_TOKEN') - -# Verify the token by checking your identity -try: - user_info = whoami(token=hf_token) - print(f"Logged in as: {user_info['name']}") -except Exception as e: - print(f"Could not log in: {e}") - print("Please make sure you have added your Hugging Face token to Colab Secrets with the name 'HF_TOKEN'") - -"""IF YOU WISH TO LOAD SOME DATASET TO TEST ANYTHING""" - -from datasets import load_dataset - -# Load a dataset (e.g., the SQuAD dataset for question answering) -dataset = load_dataset("squad") - -# Print information about the dataset -print(dataset) - -# Access an example from the training set -print("\nExample from the training set:") -print(dataset["train"][0]) - -"""SAMPLE SUMMARISATION CODE""" - -from transformers import pipeline - -# Load the summarization pipeline -summarizer = pipeline("summarization") - -# Text to summarize -text = """ -Hugging Face is a company and open-source platform that provides tools and models for natural language processing (NLP). It has become a central hub for the ML community, offering a wide range of pre-trained models that can be easily used or fine-tuned for specific applications. Key aspects of Hugging Face include the Transformers library, Model Hub, Datasets library, and Tokenizers library. Hugging Face democratizes access to powerful ML models, making it easier for developers and researchers to build and deploy applications. -""" - -# Summarize the text -summary = summarizer(text, max_length=50, min_length=25, do_sample=False) - -print("Original Text:") -print(text) -print("\nSummary:") -print(summary[0]['summary_text']) - -"""ASSIGNMENT GOES BELOW -""" - -# text_summarizer_app.py - -import gradio as gr -from transformers import pipeline - -summarizer = pipeline("summarization", model="facebook/bart-large-cnn", token="",device=0) - -def summarize_text(text): - summary = summarizer(text, max_length=150, min_length=30, do_sample=False)[0]["summary_text"] - - output_file = "summary_output.txt" - with open(output_file, "w", encoding="utf-8") as f: - f.write(summary) - return summary, output_file - -# 4๏ธโƒฃ Create Gradio Interface -with gr.Blocks(title="Text Summarizer") as demo: - - with gr.Row(): - text_input = gr.Textbox( - label="Enter your text here" - ) - - summarize_btn = gr.Button("Summarize") - output_text = gr.Textbox(label="Summary Output") - export_file = gr.File(label="Download Summary") - - summarize_btn.click(summarize_text, inputs=text_input, outputs=[output_text, export_file]) - - # Add light/dark theme toggle - theme_toggle = gr.Radio( - ["Light", "Dark"], - label="Select Theme", - value="Light" - ) - -# 5๏ธโƒฃ Launch app -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/Nandini_Reddy/Day3/app.py b/Nandini_Reddy/Day3/app.py deleted file mode 100644 index f4f4528..0000000 --- a/Nandini_Reddy/Day3/app.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Example 5: Complete Streamlit Application - -Key Teaching Points: -- Putting together all concepts: session state, chat interface, sidebar -- Professional app structure and organization -- Error handling and user feedback -- Production-ready patterns -""" - -import streamlit as st -import time -import random -from datetime import datetime - -# Page configuration -st.set_page_config( - page_title="Complete Streamlit Demo", - page_icon="๐Ÿš€", - layout="wide", - initial_sidebar_state="expanded" -) - -# Initialize session state -def initialize_session_state(): - """Initialize all session state variables with defaults""" - defaults = { - "messages": [{"role": "assistant", "content": "Hello! I'm your demo assistant. How can I help you today?"}], - "settings": { - "assistant_name": "Demo Assistant", - "response_style": "Friendly", - "max_history": 50, - "show_timestamps": True - }, - "stats": { - "total_messages": 0, - "session_start": datetime.now() - } - } - - for key, value in defaults.items(): - if key not in st.session_state: - st.session_state[key] = value - -# Initialize app -initialize_session_state() - -# Helper functions -def add_message(role, content): - """Add a message to chat history with timestamp""" - message = { - "role": role, - "content": content, - "timestamp": datetime.now() - } - st.session_state.messages.append(message) - - # Trim history if too long - max_history = st.session_state.settings["max_history"] - if len(st.session_state.messages) > max_history: - # Keep first message (greeting) and trim from the middle - st.session_state.messages = [st.session_state.messages[0]] + st.session_state.messages[-(max_history-1):] - -def generate_response(user_input): - """Generate a demo response based on settings""" - style = st.session_state.settings["response_style"] - - if style == "Professional": - responses = [ - f"Thank you for your message regarding '{user_input}'. I've processed your request and understand your query.", - f"I acknowledge your input: '{user_input}'. Please allow me to provide you with a comprehensive response.", - f"Your inquiry about '{user_input}' has been noted. I'm here to assist you with professional guidance." - ] - elif style == "Creative": - responses = [ - f"๐ŸŽจ Wow! '{user_input}' - that sparks so many creative possibilities! Let me paint you a picture with words...", - f"โœจ Your message '{user_input}' is like a canvas waiting for artistic interpretation! Here's my creative take...", - f"๐ŸŒŸ '{user_input}' - what an inspiring prompt! Let me weave some creative magic around that idea..." - ] - else: # Friendly - responses = [ - f"That's really interesting! You mentioned '{user_input}' and I think that's a great topic to explore together! ๐Ÿ˜Š", - f"I love that you brought up '{user_input}'! It's always exciting to chat about new things. Let me share my thoughts!", - f"Hey, great question about '{user_input}'! I'm happy to help you with that. Here's what I'm thinking..." - ] - - return random.choice(responses) - -# Sidebar Configuration -with st.sidebar: - st.header("๐ŸŽ›๏ธ Configuration") - - # Assistant settings - st.subheader("Assistant Settings") - assistant_name = st.text_input( - "Assistant Name:", - value=st.session_state.settings["assistant_name"] - ) - - response_style = st.selectbox( - "Response Style:", - ["Friendly", "Professional", "Creative"], - index=["Friendly", "Professional", "Creative"].index(st.session_state.settings["response_style"]) - ) - - # Chat settings - st.subheader("Chat Settings") - max_history = st.slider( - "Max Chat History:", - min_value=10, - max_value=100, - value=st.session_state.settings["max_history"], - help="Maximum number of messages to keep in chat history" - ) - - show_timestamps = st.checkbox( - "Show Timestamps", - value=st.session_state.settings["show_timestamps"] - ) - - # Update settings - st.session_state.settings.update({ - "assistant_name": assistant_name, - "response_style": response_style, - "max_history": max_history, - "show_timestamps": show_timestamps - }) - - st.divider() - - # Statistics - st.subheader("๐Ÿ“Š Session Stats") - session_duration = datetime.now() - st.session_state.stats["session_start"] - st.metric("Session Duration", f"{session_duration.seconds // 60}m {session_duration.seconds % 60}s") - st.metric("Messages Sent", st.session_state.stats["total_messages"]) - st.metric("Total Messages", len(st.session_state.messages)) - - st.divider() - - # Actions - st.subheader("๐Ÿ”ง Actions") - col1, col2 = st.columns(2) - - with col1: - if st.button("๐Ÿ—‘๏ธ Clear Chat", type="secondary"): - st.session_state.messages = [ - {"role": "assistant", "content": f"Hello! I'm {assistant_name}. Chat cleared - let's start fresh!"} - ] - st.rerun() - - with col2: - if st.button("๐Ÿ“ค Export Chat", type="secondary"): - chat_export = f"Chat Export - {datetime.now().strftime('%Y-%m-%d %H:%M')}\n" - chat_export += "=" * 50 + "\n\n" - - for msg in st.session_state.messages: - role = "You" if msg["role"] == "user" else assistant_name - timestamp = msg.get("timestamp", datetime.now()).strftime("%H:%M") - chat_export += f"[{timestamp}] {role}: {msg['content']}\n\n" - - st.download_button( - "๐Ÿ’พ Download", - chat_export, - file_name=f"chat_export_{datetime.now().strftime('%Y%m%d_%H%M')}.txt", - mime="text/plain" - ) - -# Main content area -st.title(f"๐Ÿš€ {assistant_name}") -st.caption(f"Response Style: {response_style} | History Limit: {max_history} messages") - -# Chat display -chat_container = st.container() -with chat_container: - for message in st.session_state.messages: - role_display = "You" if message["role"] == "user" else assistant_name - - with st.chat_message(message["role"]): - if show_timestamps and "timestamp" in message: - timestamp = message["timestamp"].strftime("%H:%M:%S") - st.caption(f"{role_display} - {timestamp}") - - st.write(message["content"]) - -# Chat input -if prompt := st.chat_input(f"Message {assistant_name}..."): - # Add user message - add_message("user", prompt) - st.session_state.stats["total_messages"] += 1 - - # Display user message - with st.chat_message("user"): - if show_timestamps: - st.caption(f"You - {datetime.now().strftime('%H:%M:%S')}") - st.write(prompt) - - # Generate and display assistant response - with st.chat_message("assistant"): - if show_timestamps: - st.caption(f"{assistant_name} - {datetime.now().strftime('%H:%M:%S')}") - - # Show typing indicator - with st.spinner(f"{assistant_name} is thinking..."): - time.sleep(random.uniform(0.5, 2.0)) # Realistic delay - - # Generate response - response = generate_response(prompt) - st.write(response) - - # Add assistant response to history - add_message("assistant", response) - - # Rerun to update the display - st.rerun() - -# Footer with helpful info -st.write("---") -with st.expander("โ„น๏ธ About This Demo"): - st.write(f""" - **Complete Streamlit Application Demo** - - This example demonstrates a production-ready Streamlit application with: - - โœ… **Session State Management**: Persistent chat history and settings - โœ… **Professional UI**: Clean layout with sidebar configuration - โœ… **Error Handling**: Graceful handling of edge cases - โœ… **User Feedback**: Loading indicators and confirmation messages - โœ… **Export Functionality**: Download chat history as text file - โœ… **Responsive Design**: Works well on different screen sizes - โœ… **Statistics Tracking**: Session metrics and usage data - - **Architecture Patterns Used:** - - Initialization functions for clean startup - - Helper functions for common operations - - Separation of concerns (UI, logic, data) - - Consistent error handling and user feedback - - Current session: {len(st.session_state.messages)} messages in {max_history} message limit - """) - -# Teaching notes for instructors -with st.expander("๐ŸŽ“ Instructor Notes"): - st.write(""" - **Teaching Points to Emphasize:** - - 1. **Session State Patterns:** - - Initialization with defaults - - Nested dictionaries for organization - - Trimming data to prevent memory issues - - 2. **Professional App Structure:** - - Page configuration at the top - - Initialization functions - - Helper functions for reusable logic - - Consistent naming conventions - - 3. **User Experience:** - - Loading indicators during processing - - Confirmation messages for actions - - Export functionality for data portability - - Responsive layout considerations - - 4. **Production Considerations:** - - Memory management (history limits) - - Error handling and graceful failures - - Performance optimization (minimize reruns) - - Accessibility and usability - - **Extension Ideas for Advanced Students:** - - Add user authentication - - Implement persistent storage (database) - - Add real AI model integration - - Multi-page application structure - - Custom CSS styling - - Advanced analytics and metrics - """) - -# Development info (hidden by default) -if st.checkbox("Show Development Info", value=False): - st.write("**Current Session State:**") - st.json({k: v for k, v in dict(st.session_state).items() if k not in ["messages"]}) - st.write(f"**Messages in Memory:** {len(st.session_state.messages)}") \ No newline at end of file diff --git a/Nandini_Reddy/Day3/chatgpt.py b/Nandini_Reddy/Day3/chatgpt.py deleted file mode 100644 index b14d776..0000000 --- a/Nandini_Reddy/Day3/chatgpt.py +++ /dev/null @@ -1,827 +0,0 @@ -# -*- coding: utf-8 -*- -"""ChatGPT-breakout-room-2 .ipynb - -Automatically generated by Colab. - -Original file is located at - https://colab.research.google.com/drive/1tCSjRH5XVEvoOWTcCFvhUqOgeR0aNbZr - -# ChatGPT Like app Via Streamlit -Project Structure would be - - -1. app.py -2. requirements.txt -3. .streamlit > secrets.toml > OPENROUTER_API_KEY goes here -4. chat-history (folder) - - -app.py file below (discussed in the class) -""" - -# Multi-chat app with persistent history using OpenRouter -# Features: Multiple conversations, persistent storage, chat history in sidebar - -import streamlit as st -from openai import OpenAI -import os -import json -from datetime import datetime -from pathlib import Path - -# Configure the page -st.set_page_config(page_title="My ChatBot", page_icon="๐Ÿค–", layout="wide") - -# Initialize the OpenAI client with OpenRouter -try: - api_key = st.secrets["OPENROUTER_API_KEY"] -except Exception: - st.error("OPENROUTER_API_KEY not found in .streamlit/secrets.toml") - st.stop() - -client = OpenAI( - base_url="https://openrouter.ai/api/v1", - api_key=api_key, - default_headers={ - "HTTP-Referer": "http://localhost:8504", - "X-Title": "My ChatBot", - } -) - -# Setup persistent storage directory -CHAT_STORAGE_DIR = Path(__file__).parent / "chat_history" -CHAT_STORAGE_DIR.mkdir(exist_ok=True) - -# ============================================================================ -# CHAT PERSISTENCE FUNCTIONS -# ============================================================================ - -def get_all_chats(): - """Get all chat files sorted by modification time (newest first)""" - chat_files = list(CHAT_STORAGE_DIR.glob("chat_*.json")) - chat_files.sort(key=lambda x: x.stat().st_mtime, reverse=True) - return chat_files - -def load_chat(chat_id): - """Load a specific chat by ID""" - chat_file = CHAT_STORAGE_DIR / f"chat_{chat_id}.json" - if chat_file.exists(): - with open(chat_file, 'r', encoding='utf-8') as f: - data = json.load(f) - return data - return None - -def save_chat(chat_id, messages, title=None): - """Save chat to disk""" - chat_file = CHAT_STORAGE_DIR / f"chat_{chat_id}.json" - - # Auto-generate title from first user message if not provided - if title is None and messages: - for msg in messages: - if msg["role"] == "user": - title = msg["content"][:50] + ("..." if len(msg["content"]) > 50 else "") - break - - if title is None: - title = "New Chat" - - data = { - "chat_id": chat_id, - "title": title, - "messages": messages, - "created_at": datetime.now().isoformat(), - "updated_at": datetime.now().isoformat() - } - - # If file exists, preserve created_at - if chat_file.exists(): - with open(chat_file, 'r', encoding='utf-8') as f: - old_data = json.load(f) - data["created_at"] = old_data.get("created_at", data["created_at"]) - - with open(chat_file, 'w', encoding='utf-8') as f: - json.dump(data, f, ensure_ascii=False, indent=2) - -def delete_chat(chat_id): - """Delete a chat file""" - chat_file = CHAT_STORAGE_DIR / f"chat_{chat_id}.json" - if chat_file.exists(): - chat_file.unlink() - -def create_new_chat(): - """Create a new chat with unique ID""" - chat_id = datetime.now().strftime("%Y%m%d_%H%M%S_%f") - return chat_id - -def get_chat_title(chat_data): - """Extract chat title from chat data""" - return chat_data.get("title", "Untitled Chat") - -# ============================================================================ -# SESSION STATE INITIALIZATION -# ============================================================================ - -# Initialize current chat ID -if "current_chat_id" not in st.session_state: - # Try to load the most recent chat, or create new one - all_chats = get_all_chats() - if all_chats: - latest_chat = load_chat(all_chats[0].stem.replace("chat_", "")) - st.session_state.current_chat_id = latest_chat["chat_id"] - st.session_state.messages = latest_chat["messages"] - st.session_state.chat_title = latest_chat["title"] - else: - st.session_state.current_chat_id = create_new_chat() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - -# Initialize messages -if "messages" not in st.session_state: - st.session_state.messages = [] - -# Initialize chat title -if "chat_title" not in st.session_state: - st.session_state.chat_title = "New Chat" - -# Initialize feedback -if "feedback" not in st.session_state: - st.session_state.feedback = {} - -# Initialize dark mode -if "dark_mode" not in st.session_state: - st.session_state.dark_mode = True - -# ============================================================================ -# SIDEBAR: CHAT MANAGEMENT -# ============================================================================ - -with st.sidebar: - st.header("๐Ÿ’ฌ Conversations") - - # New Chat button - if st.button("โž• New Chat", use_container_width=True, type="primary"): - # Save current chat before creating new one - if st.session_state.messages: - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - - # Create new chat - st.session_state.current_chat_id = create_new_chat() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - st.session_state.feedback = {} - st.rerun() - - st.divider() - - # List all chats - st.subheader("Chat History") - all_chats = get_all_chats() - - if all_chats: - for chat_file in all_chats: - chat_id = chat_file.stem.replace("chat_", "") - chat_data = load_chat(chat_id) - - if chat_data: - chat_title = get_chat_title(chat_data) - is_current = chat_id == st.session_state.current_chat_id - - col1, col2 = st.columns([4, 1]) - - with col1: - # Show current chat with indicator - button_label = f"{'๐ŸŸข ' if is_current else ''}{chat_title}" - if st.button( - button_label, - key=f"load_{chat_id}", - use_container_width=True, - disabled=is_current, - type="secondary" if is_current else "tertiary" - ): - # Save current chat before switching - if st.session_state.messages: - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - - # Load selected chat - st.session_state.current_chat_id = chat_id - st.session_state.messages = chat_data["messages"] - st.session_state.chat_title = chat_title - st.session_state.feedback = {} - st.rerun() - - with col2: - # Delete button (only for non-current chats or if it's the only chat) - if st.button("๐Ÿ—‘๏ธ", key=f"delete_{chat_id}", help="Delete chat"): - delete_chat(chat_id) - - # If we deleted the current chat, switch to another or create new - if chat_id == st.session_state.current_chat_id: - remaining_chats = [c for c in all_chats if c.stem.replace("chat_", "") != chat_id] - if remaining_chats: - new_chat_data = load_chat(remaining_chats[0].stem.replace("chat_", "")) - st.session_state.current_chat_id = new_chat_data["chat_id"] - st.session_state.messages = new_chat_data["messages"] - st.session_state.chat_title = new_chat_data["title"] - else: - st.session_state.current_chat_id = create_new_chat() - st.session_state.messages = [] - st.session_state.chat_title = "New Chat" - st.session_state.feedback = {} - - st.rerun() - else: - st.info("No chat history yet. Start a new conversation!") - - st.divider() - - # Settings section - st.subheader("โš™๏ธ Settings") - dark_mode = st.toggle("Dark mode", value=st.session_state.dark_mode) - st.session_state.dark_mode = dark_mode - - # Clear current chat - if st.button("๐Ÿ—‘๏ธ Clear Current Chat", use_container_width=True): - st.session_state.messages = [] - st.session_state.feedback = {} - st.session_state.chat_title = "New Chat" - save_chat(st.session_state.current_chat_id, [], "New Chat") - st.rerun() - -# ============================================================================ -# APPLY THEMING -# ============================================================================ - -if st.session_state.dark_mode: - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - -# ============================================================================ -# MAIN CHAT INTERFACE -# ============================================================================ - -# App title with current chat title -st.title(f"๐Ÿค– {st.session_state.chat_title}") - -# Summarize conversation - for entire current chat -with st.expander("๐Ÿ“ Summarize Conversation", expanded=False): - st.write("Generate a summary of the entire conversation in this chat") - - if st.button("Generate Summary", use_container_width=True): - if not st.session_state.messages: - st.warning("No messages to summarize yet!") - else: - try: - with st.spinner("Generating summary..."): - summary_resp = client.chat.completions.create( - model="openai/gpt-oss-120b", - messages=[ - {"role": "system", "content": "Summarize the conversation into concise key points and action items."}, - *st.session_state.messages, - ], - stream=False, - extra_body={} - ) - summary_text = summary_resp.choices[0].message.content.strip() - st.markdown("### Summary") - st.markdown(summary_text) - except Exception as e: - st.error(f"Summary failed: {e}") - -# Display chat history -for idx, message in enumerate(st.session_state.messages): - with st.chat_message(message["role"]): - st.markdown(message["content"]) - if message["role"] == "assistant": - c1, c2 = st.columns([1, 1]) - with c1: - if st.button("๐Ÿ‘", key=f"up_{idx}"): - st.session_state.feedback[idx] = "up" - with c2: - if st.button("๐Ÿ‘Ž", key=f"down_{idx}"): - st.session_state.feedback[idx] = "down" - -# Handle user input -if prompt := st.chat_input("What would you like to know?"): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - - # Update chat title if this is the first message - if len(st.session_state.messages) == 1: - st.session_state.chat_title = prompt[:50] + ("..." if len(prompt) > 50 else "") - - # Display user message - with st.chat_message("user"): - st.markdown(prompt) - - # Generate AI response - with st.chat_message("assistant"): - try: - response = client.chat.completions.create( - model="openai/gpt-oss-120b", - messages=st.session_state.messages, - stream=True, - extra_headers={ - "HTTP-Referer": "http://localhost:8503", - "X-Title": "My ChatBot" - }, - extra_body={} - ) - - # Stream the response - response_text = "" - response_placeholder = st.empty() - - for chunk in response: - if chunk.choices[0].delta.content is not None: - # Clean up unwanted tokens - content = chunk.choices[0].delta.content - content = ( - content.replace('', '') - .replace('<|im_start|>', '') - .replace('<|im_end|>', '') - .replace("<|OUT|>", "") - ) - response_text += content - response_placeholder.markdown(response_text + "โ–Œ") - - # Final cleanup of response text - response_text = ( - response_text.replace('', '') - .replace('<|im_start|>', '') - .replace('<|im_end|>', '') - .replace("<|OUT|>", "") - .strip() - ) - response_placeholder.markdown(response_text) - - # Add assistant response to chat history - st.session_state.messages.append( - {"role": "assistant", "content": response_text} - ) - - # Save chat to disk - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - - except Exception as e: - st.error(f"Error: {str(e)}") - st.info("Please check your API key and try again.") - -# Auto-save chat when messages change (backup mechanism) -if st.session_state.messages: - save_chat( - st.session_state.current_chat_id, - st.session_state.messages, - st.session_state.chat_title - ) - -"""--- - - - ---- - -# Challenge 1 - -# Challenge 1: Translation Mode - -**Difficulty**: Intermediate -**Estimated Time**: 30-40 minutes -**Focus**: System prompts, multiple API calls, language detection - -## Challenge Description - -Transform your basic chatbot into an intelligent translation assistant that can automatically detect languages and provide high-quality translations with cultural context. - -## User Story - -*"As a user, I want to type text in any language and have the chatbot automatically detect the language and offer to translate it to my preferred target language. The bot should also provide cultural context and alternative translations when relevant."* - -## Requirements - -### Core Features (Must Have) -- [x] **Language Detection**: Automatically identify the input language -- [x] **Translation**: Translate text to user-selected target language -- [x] **Language Selection**: Sidebar control for target language selection -- [x] **Bidirectional Translation**: Support translation in both directions - -### Advanced Features (Nice to Have) -- [x] **Cultural Context**: Provide cultural notes for idiomatic expressions -- [x] **Alternative Translations**: Offer multiple translation options -- [x] **Confidence Scoring**: Show confidence level of detection/translation -- [x] **Translation History**: Keep track of translation pairs - -## Technical Approach - -You'll need to modify your existing chatbot to: - -1. **System Prompt Engineering**: Create a specialized prompt for translation tasks -2. **Two-Stage Process**: First detect language, then translate -3. **State Management**: Track source/target languages in session state -4. **UI Enhancements**: Add language selection controls - -## Key Learning Objectives - -- Master system prompt engineering for specialized tasks -- Handle multiple API calls in sequence -- Implement conditional logic based on AI responses -- Create professional translation UX patterns - -## Getting Started - -1. Copy your working chatbot from the main workshop -2. Add language selection to the sidebar -3. Modify the system prompt for translation tasks -4. Implement the two-stage detection + translation process - -## Example Interactions - -**Input**: "Bonjour, comment allez-vous?" -**Output**: -``` -๐Ÿ” Detected Language: French -๐ŸŽฏ Translation (English): "Hello, how are you?" - -๐Ÿ’ก Cultural Note: This is a formal greeting in French. In casual settings, - you might hear "Salut, รงa va?" instead. -``` - -**Input**: "I love this weather" -**Output**: -``` -๐Ÿ” Detected Language: English -๐ŸŽฏ Translation (Spanish): "Me encanta este clima" - -๐ŸŒŸ Alternative: "Adoro este tiempo" (more emphatic) -๐Ÿ’ก Regional Note: In Mexico, you might also hear "estรก padrรญsimo el clima" -``` - -## Hints Available - -- Progressive hints in `hints.md` -- Complete solution in `solution.py` -- Don't peek at the solution until you've tried for at least 20 minutes! - -## Success Criteria - -Your translation bot should: -โœ… Automatically detect input language -โœ… Translate accurately to target language -โœ… Provide cultural context when relevant -โœ… Handle errors gracefully -โœ… Maintain conversation history -โœ… Have intuitive language selection UI - -## Extension Ideas - -- Add support for document translation -- Implement translation confidence scoring -- Create translation glossaries for technical terms -- Add pronunciation guides -- Support batch translation of multiple texts - -Good luck! Remember, the goal is to learn the patterns - don't be afraid to experiment and iterate. - ---- - - - ---- - -# Challenge 2 - -# Challenge 2: Personality Selector - -**Difficulty**: Beginner-Intermediate -**Estimated Time**: 25-35 minutes -**Focus**: Session state management, system prompts, UI controls - -## Challenge Description - -Transform your chatbot into a versatile assistant with multiple personalities that users can select from. Each personality should have a distinct communication style, expertise area, and approach to helping users. - -## User Story - -*"As a user, I want to choose from different AI personality modes (like Professional, Creative, Technical, etc.) so that I get responses that match the style and expertise I need for my current task."* - -## Requirements - -### Core Features (Must Have) -- [x] **Personality Selection**: Sidebar dropdown with at least 4 personality options -- [x] **Dynamic System Prompts**: Each personality uses a different system prompt -- [x] **Visual Indicators**: Show current personality in the chat interface -- [x] **Personality Persistence**: Remember selected personality across conversation - -### Advanced Features (Nice to Have) -- [x] **Custom Personality**: Allow users to define their own personality -- [x] **Personality Descriptions**: Show what each personality is good for -- [x] **Response Style Preview**: Show example responses for each personality -- [x] **Personality Switching**: Allow mid-conversation personality changes - -## Personality Options - -### 1. Professional Business Assistant -- **Style**: Formal, structured, business-focused -- **Expertise**: Business strategy, professional communication -- **Tone**: Polite, efficient, results-oriented - -### 2. Creative Writing Helper -- **Style**: Imaginative, expressive, inspiring -- **Expertise**: Creative writing, storytelling, artistic projects -- **Tone**: Enthusiastic, artistic, encouraging - -### 3. Technical Expert -- **Style**: Precise, detailed, code-focused -- **Expertise**: Programming, technology, problem-solving -- **Tone**: Analytical, methodical, educational - -### 4. Friendly Companion -- **Style**: Casual, supportive, conversational -- **Expertise**: General chat, emotional support, casual advice -- **Tone**: Warm, empathetic, encouraging - -### 5. Custom Personality (Advanced) -- **Style**: User-defined -- **Expertise**: User-specified -- **Tone**: Customizable - -## Technical Approach - -You'll modify your existing chatbot to: - -1. **Add Personality State**: Track selected personality in session state -2. **System Prompt Engineering**: Create distinct prompts for each personality -3. **UI Enhancement**: Add sidebar controls for personality selection -4. **Message Context**: Inject personality context into conversation - -## Key Learning Objectives - -- Master system prompt engineering for different use cases -- Implement dynamic behavior based on user selections -- Create intuitive personality switching UX -- Understand how context shapes AI responses - -## Getting Started - -1. Copy your working chatbot from the main workshop -2. Add personality selection to the sidebar -3. Create system prompts for each personality -4. Implement dynamic prompt switching - -## Example Interactions - -**Professional Mode**: -``` -User: "How do I handle a difficult client meeting?" -Assistant: "I recommend a structured approach to managing challenging client interactions: - -1. Preparation: Review all relevant documentation beforehand -2. Active Listening: Allow the client to express their concerns fully -3. Solution-Focused Discussion: Present 2-3 concrete options -4. Clear Next Steps: Document agreed actions with timelines - -Would you like me to elaborate on any of these strategies?" -``` - -**Creative Mode**: -``` -User: "How do I handle a difficult client meeting?" -Assistant: "Ah, the art of navigating stormy client waters! ๐ŸŽญ - -Picture this: You're not just attending a meeting, you're crafting a story where everyone wins. Start by becoming a master listener - let their frustrations paint the full picture. Then, like a skilled storyteller, weave their concerns into solutions that feel like plot twists they never saw coming! - -Transform the tension into creative energy. What if this 'difficult' meeting becomes the turning point where you surprise them with innovative approaches they hadn't considered? โœจ" -``` - -## Success Criteria - -Your personality selector should: -โœ… Offer at least 4 distinct personalities -โœ… Show clear differences in response style -โœ… Maintain personality consistency throughout conversation -โœ… Allow personality switching mid-conversation -โœ… Display current personality clearly to users -โœ… Handle personality changes gracefully - -## Extension Ideas - -- Add personality-specific conversation starters -- Implement personality learning (adapt based on user feedback) -- Create personality profiles with detailed descriptions -- Add visual avatars for each personality -- Enable personality mixing (combine traits from multiple personalities) - -Remember: The goal is to understand how system prompts shape AI behavior. Experiment with different prompt styles and see how dramatically they change the responses! - ---- - - - ---- - - - ---- - -# Challenge 3 - -# Challenge 3: Export Functionality - -**Difficulty**: Intermediate-Advanced -**Estimated Time**: 35-45 minutes -**Focus**: Data formatting, file operations, download handling - -## Challenge Description - -Add comprehensive export functionality to your chatbot that allows users to download their conversation history in multiple formats with rich metadata and formatting options. - -## User Story - -*"As a user, I want to export my conversation history in different formats (TXT, JSON, CSV) so I can save important discussions, share them with others, or import them into other tools for analysis."* - -## Requirements - -### Core Features (Must Have) -- [x] **Multiple Export Formats**: TXT, JSON, and CSV export options -- [x] **Download Functionality**: Use `st.download_button` for direct downloads -- [x] **Formatted Output**: Clean, readable formatting for each format -- [x] **Metadata Inclusion**: Timestamps, message counts, session info - -### Advanced Features (Nice to Have) -- [x] **Export Filtering**: Allow users to export specific date ranges or message types -- [x] **Rich Text Formatting**: Markdown preservation, code blocks, special formatting -- [x] **Statistics Summary**: Include conversation analytics in exports -- [x] **Batch Export**: Export multiple conversations or sessions - -## Export Format Specifications - -### 1. TXT Format (Human Readable) -``` -Chat Export - 2024-01-15 14:30 -======================================== - -Session Information: -- Total Messages: 12 -- Duration: 25 minutes -- Export Date: 2024-01-15 14:55 - -Conversation: ----------------------------------------- - -[14:30:15] You: Hello! How can I help you today? - -[14:30:22] Assistant: Hello! I'm here to help you with any questions or tasks you have. What would you like to discuss? - -[14:30:45] You: Can you explain machine learning? - -[14:31:02] Assistant: Machine learning is a subset of artificial intelligence... -``` - -### 2. JSON Format (Structured Data) -```json -{ - "export_metadata": { - "export_timestamp": "2024-01-15T14:55:30Z", - "format_version": "1.0", - "session_id": "session_123", - "total_messages": 12, - "session_duration_minutes": 25 - }, - "conversation": [ - { - "timestamp": "2024-01-15T14:30:15Z", - "role": "user", - "content": "Hello! How can I help you today?", - "message_id": 1, - "character_count": 32 - }, - { - "timestamp": "2024-01-15T14:30:22Z", - "role": "assistant", - "content": "Hello! I'm here to help you...", - "message_id": 2, - "character_count": 95 - } - ], - "statistics": { - "user_messages": 6, - "assistant_messages": 6, - "total_characters": 2847, - "average_message_length": 237 - } -} -``` - -### 3. CSV Format (Data Analysis) -```csv -Message_ID,Timestamp,Role,Content,Character_Count,Word_Count -1,2024-01-15 14:30:15,user,"Hello! How can I help you today?",32,8 -2,2024-01-15 14:30:22,assistant,"Hello! I'm here to help you...",95,18 -``` - -## Technical Approach - -You'll need to implement: - -1. **Data Processing**: Convert session state messages to exportable formats -2. **File Generation**: Create properly formatted files in memory -3. **Download Interface**: Use Streamlit's download functionality -4. **Export Options**: Allow users to customize what gets exported - -## Key Learning Objectives - -- Master data transformation and formatting -- Implement file generation and download patterns -- Create user-friendly export interfaces -- Handle different data formats and their use cases - -## Getting Started - -1. Copy your working chatbot from the main workshop -2. Add export controls to the sidebar -3. Implement basic TXT export first -4. Add JSON and CSV formats progressively - -## Example Implementation Structure - -```python -def export_as_txt(messages, metadata): - '''Convert messages to formatted text''' - # Implementation here - return text_content - -def export_as_json(messages, metadata): - '''Convert messages to structured JSON''' - # Implementation here - return json_content - -def export_as_csv(messages, metadata): - '''Convert messages to CSV format''' - # Implementation here - return csv_content - -# In sidebar -if st.button("๐Ÿ“ค Export Chat"): - if format_choice == "TXT": - content = export_as_txt(st.session_state.messages, metadata) - st.download_button("๐Ÿ’พ Download TXT", content, "chat.txt") -``` - -## Success Criteria - -Your export system should: -โœ… Generate clean, readable exports in all three formats -โœ… Include relevant metadata and timestamps -โœ… Handle edge cases (empty conversations, special characters) -โœ… Provide intuitive export options and filters -โœ… Generate properly formatted files for each format -โœ… Calculate and include conversation statistics - -## Extension Ideas - -- **Email Integration**: Send exports via email -- **Cloud Storage**: Upload exports to Google Drive, Dropbox -- **Template System**: Custom export templates -- **Compression**: ZIP multiple formats together -- **Scheduling**: Automatic periodic exports -- **Import Functionality**: Import previous conversations - -## Format-Specific Considerations - -### TXT Format -- Human-readable timestamps -- Clear conversation flow -- Preserve formatting and line breaks -- Include conversation statistics - -### JSON Format -- Valid JSON structure -- Rich metadata -- Extensible for future features -- Machine-readable timestamps - -### CSV Format -- Proper escaping of commas and quotes -- Headers for data analysis tools -- Consistent data types -- Easy import into spreadsheets - -Remember: This challenge is about data handling and user experience. Focus on creating exports that are genuinely useful for different purposes! -""" \ No newline at end of file diff --git a/Nandini_Reddy/Day3/requirements.txt b/Nandini_Reddy/Day3/requirements.txt deleted file mode 100644 index ddc2f62..0000000 Binary files a/Nandini_Reddy/Day3/requirements.txt and /dev/null differ diff --git a/Nandini_Reddy/Day4/Linked_Job_Automation.json b/Nandini_Reddy/Day4/Linked_Job_Automation.json deleted file mode 100644 index 7e2cdc1..0000000 --- a/Nandini_Reddy/Day4/Linked_Job_Automation.json +++ /dev/null @@ -1,576 +0,0 @@ -{ - "name": "Linked_Job_Automation", - "nodes": [ - { - "parameters": {}, - "type": "n8n-nodes-base.manualTrigger", - "typeVersion": 1, - "position": [ - -560, - 64 - ], - "id": "cabffb0b-3721-49fb-ad4b-e292e796a5ba", - "name": "When clicking โ€˜Execute workflowโ€™" - }, - { - "parameters": { - "url": "https://rss.app/feeds/yZvjeAV1rx2jWW1V.xml", - "options": {} - }, - "type": "n8n-nodes-base.rssFeedRead", - "typeVersion": 1.1, - "position": [ - -256, - 96 - ], - "id": "f8a2318b-9abd-4a34-9542-325bc8195fd9", - "name": "RSS Read" - }, - { - "parameters": { - "url": "={{ $('RSS Read').item.json.link }}", - "options": {} - }, - "type": "n8n-nodes-base.httpRequest", - "typeVersion": 4.2, - "position": [ - 176, - 96 - ], - "id": "eb71e181-a93b-4087-969e-0e6fe6e1258d", - "name": "HTTP Request" - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "gpt-4o-mini", - "mode": "list", - "cachedResultName": "GPT-4O-MINI" - }, - "messages": { - "values": [ - { - "content": "You're an intelligent bot capable of pulling out data from a job listing site.", - "role": "system" - }, - { - "content": "=Here's the job site:\n {{ $json.data }}" - }, - { - "content": "Please output the data in JSON format, using the following structure:\n\n{\n\"company_name\": \"\",\n\"benefits\": \"\", //401K plan, medical, etc, please separate by commas\n\"job_description\": \"\", //please make it 200 characters or less\n\"location\": \"\"\n}", - "role": "assistant" - } - ] - }, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - 368, - 80 - ], - "id": "c689b6c6-4042-4ec7-beca-17b0bab35a41", - "name": "OpenAI", - "credentials": { - "openAiApi": { - "id": "c0jeJgIzLB3A6YaN", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "maxItems": 5 - }, - "type": "n8n-nodes-base.limit", - "typeVersion": 1, - "position": [ - -48, - 96 - ], - "id": "c2a4932d-f651-4e74-8dcd-eda381b3411b", - "name": "Limit" - }, - { - "parameters": { - "url": "https://rss.app/feeds/y708YSuVvuVLmHgB.xml", - "options": {} - }, - "type": "n8n-nodes-base.rssFeedRead", - "typeVersion": 1.1, - "position": [ - 1408, - -528 - ], - "id": "f22df730-024d-4b1e-a189-5231b366a3da", - "name": "RSS Read1" - }, - { - "parameters": { - "rule": { - "interval": [ - {} - ] - } - }, - "type": "n8n-nodes-base.scheduleTrigger", - "typeVersion": 1.2, - "position": [ - 720, - -528 - ], - "id": "4ae6bd0e-110b-4ef8-9de8-7001c7cdacb6", - "name": "Schedule Trigger1" - }, - { - "parameters": { - "fieldToSplitOut": "['RSS feeds']", - "options": {} - }, - "type": "n8n-nodes-base.splitOut", - "typeVersion": 1, - "position": [ - 1168, - -528 - ], - "id": "a7c38609-ff46-4614-aed1-db2fe6c572df", - "name": "Split Out" - }, - { - "parameters": { - "operation": "appendOrUpdate", - "documentId": { - "__rl": true, - "value": "1_bNah1H6TnBG9jutjY6pZazso2S4asSL7rBOiAMBtZ8", - "mode": "list", - "cachedResultName": "NR LinkedIn Job Listings", - "cachedResultUrl": "https://docs.google.com/spreadsheets/d/1_bNah1H6TnBG9jutjY6pZazso2S4asSL7rBOiAMBtZ8/edit?usp=drivesdk" - }, - "sheetName": { - "__rl": true, - "value": "gid=0", - "mode": "list", - "cachedResultName": "Sheet1", - "cachedResultUrl": "https://docs.google.com/spreadsheets/d/1_bNah1H6TnBG9jutjY6pZazso2S4asSL7rBOiAMBtZ8/edit#gid=0" - }, - "columns": { - "mappingMode": "autoMapInputData", - "value": {}, - "matchingColumns": [ - "Title" - ], - "schema": [ - { - "id": "Title", - "displayName": "Title", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "Job Description", - "displayName": "Job Description", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Link", - "displayName": "Link", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Date", - "displayName": "Date", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Rating", - "displayName": "Rating", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Company Name", - "displayName": "Company Name", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Benefits", - "displayName": "Benefits", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Job Description", - "displayName": "Job Description", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "Cover Letter", - "displayName": "Cover Letter", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true - }, - { - "id": "id", - "displayName": "id", - "required": false, - "defaultMatch": true, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "provider", - "displayName": "provider", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "model", - "displayName": "model", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "object", - "displayName": "object", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "created", - "displayName": "created", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "choices", - "displayName": "choices", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "system_fingerprint", - "displayName": "system_fingerprint", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - }, - { - "id": "usage", - "displayName": "usage", - "required": false, - "defaultMatch": false, - "display": true, - "type": "string", - "canBeUsedToMatch": true, - "removed": false - } - ], - "attemptToConvertTypes": false, - "convertFieldsToString": false - }, - "options": {} - }, - "type": "n8n-nodes-base.googleSheets", - "typeVersion": 4.5, - "position": [ - 1424, - 64 - ], - "id": "7fde6ecf-6621-4d29-98a7-1ed6bd57db5a", - "name": "Append or update row in sheet", - "credentials": { - "googleSheetsOAuth2Api": { - "id": "NKSlMTIAsmj9htDq", - "name": "Google Sheets account" - } - } - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "gpt-4o-mini", - "mode": "list", - "cachedResultName": "GPT-4O-MINI" - }, - "messages": { - "values": [ - { - "content": "You're an intelligent bot rating how closely a job listing is to a candidates skill set, on a score of 5.\nGive the following points:\na) 3 points for skills matching, 1 point for mostly matching\nb) 1 point for it being the right experience level\nc) 1 point for it being a remote position\nd) 1 point if past job experience matches the job role\ne) 1 point if the skills on the resume align with the job\nf) 3 points if I meet the job qualifications posted based on my resume", - "role": "system" - }, - { - "content": "=Here's the job listing details:\n{{ $json.message.content.company_name }}\n{{ $json.message.content.job_description }}\n\nSample resume profile- \n\"\"\"\n### **Profile Overview**\n\nDivij Bajaj is an experienced **Data Scientist** with over **5 years** of hands-on experience in building, scaling, and deploying machine learning models that solve real-world business problems. Your expertise spans various domains, with a focus on **Generative AI**, **cloud technologies**, and **business analytics**. You have a strong ability to bridge the gap between data science and business, using data to drive impactful decisions and improvements in products and processes.\n\n### **Core Competencies**\n\n* **Generative AI**: Experience with cutting-edge generative tools and prompt engineering.\n* **Machine Learning & AI**: Expertise in building and deploying machine learning models using frameworks like **TensorFlow** and **PyTorch**.\n* **Deep Learning**: Strong knowledge in deep learning techniques like **Neural Networks**, **Convolutional Neural Networks (CNN)**, and **Recurrent Neural Networks (RNN)**.\n* **Recommendation Systems**: Experienced in **Collaborative Filtering**, **Matrix Factorization**, and **Dimensionality Reduction** for building scalable recommendation engines.\n* **Cloud Platforms**: Well-versed in **Google Cloud Platform** (GCP), **Azure**, and cloud-based solutions to scale data processing and machine learning models.\n* **Programming**: Proficient in **Python**, **SQL**, and **R**, with a deep understanding of libraries like **Scikit-learn**, **Pandas**, and **NumPy**.\n* **Data Engineering & ETL**: Comfortable with creating and managing data pipelines, ETL processes, and working with large datasets.\n* **Statistical Analysis & A/B Testing**: Strong foundation in performing statistical analysis, hypothesis testing, and A/B testing to optimize models and business decisions.\n* **Product Analytics**: Expertise in analyzing user behavior, building systems to improve user experiences, and generating recommendations.\n* **Collaboration**: Able to work effectively with cross-functional teams including engineering, product management, and marketing to derive actionable insights from data.\n\n### **Key Achievements**\n\n* **Awards**: Best Outgoing Student and Batch Topper in **MBA-Data Science & Analytics** (210 students) from **Symbiosis International University**.\n* **Research Excellence**: Awarded **First Prize** for **Best Research Paper** and Presenter at **MIT University**.\n* **Competitions**: Winner of the **First-ever Machine Learning Competition** at **VMware** and **Shark-Tank Finalist**.\n* **Publications**: Contributed notable papers like \"Conversational System, Intelligent Virtual Assistant Named DIVA Using Raspberry Pi\".\n* **Industry Impact**: Built systems that increased **Azure Market Share** and significantly improved **customer targeting** and **marketing campaigns**.\n\n### **Professional Experience**\n\n#### **Microsoft (May 2024 - Present)**\n\n* **Role**: Data & Applied Scientist II, working on **AI-powered solutions** for **EdTech** and **Cloud Operations**.\n* **Key Projects**:\n\n * Anomaly detection and **Root Cause Mitigation Assistant**.\n * Developed a recommendation system for **Azure Subscription Services** to target high-propensity customers and increase market share.\n * **AI/ML techniques** used: **Deep Learning**, **Collaborative Filtering**, **Matrix Factorization**, **A/B Testing**.\n\n#### **VMware (June 2021 - Dec 2021)**\n\n* **Role**: Data Scientist II, focusing on **AI/ML** for marketing and product analytics.\n* **Key Contributions**:\n\n * Developed recommendation systems to improve **customer engagement** and optimize marketing campaigns.\n * Used **Markov Chains** and **Apriori Algorithm** for product recommendations and **NLG** for insights generation.\n * Impacted business metrics by optimizing **campaign targeting** and **ROI**.\n\n#### **Hyper Filteration Pvt. Ltd. (June 2017 - June 2018)**\n\n* **Role**: Embedded Software Engineer, working on **Waste Water Management Solutions** for industrial clients.\n* **Key Projects**: Developed solutions based on **data analysis** to reduce water consumption and manage effluent treatment for large-scale industries.\n\n#### **Education**\n\n* **M.Tech in AI & Machine Learning**, **University of Hyderabad** (2020 - 2022)\n* **Masters in Data Science & Business Analytics**, **Symbiosis International University** (2018 - 2020)\n* **B.Tech in Electronics and Communication**, **JSS Academy of Technical Education** (2013 - 2017)\n\n#### **Certifications**\n\n* **Machine Learning with TensorFlow on Google Cloud**\n* **Google Cloud Platform Fundamentals: Core Infrastructure**\n* **Computer Vision - Object Detection with OpenCV and Python**\n\n---\n\n### **Skills Summary**\n\n* **Advanced Analytics**: You possess strong expertise in **predictive modeling**, **statistical analysis**, and **data-driven decision-making** to solve business challenges.\n* **Machine Learning Expertise**: Deep understanding of core **ML algorithms** and frameworks like **TensorFlow**, **PyTorch**, and **Scikit-learn**.\n* **Cloud & Data Engineering**: Proficient in scaling machine learning models in **cloud environments** and handling large datasets.\n* **Business Focus**: You have a proven ability to align data science projects with business goals, improving customer experience, driving sales, and influencing product development.\n* **Technical Communication**: Skilled at presenting complex data and insights to non-technical stakeholders, ensuring clarity and actionable outcomes.\n\n---\n\n### **Conclusion**\n\nYour profile highlights a well-rounded blend of technical expertise and business acumen. You have proven experience in solving large-scale business problems with **data science** and **machine learning** techniques, making you a strong candidate for roles involving advanced analytics, product decision-making, and AI-powered product development.\n\n\"\"\"\n" - } - ] - }, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - 704, - 80 - ], - "id": "62d239bc-61e8-4d26-b9b5-9f6b233dcb6d", - "name": "Message a model", - "credentials": { - "openAiApi": { - "id": "c0jeJgIzLB3A6YaN", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "modelId": { - "__rl": true, - "value": "gpt-4o-mini", - "mode": "list", - "cachedResultName": "GPT-4O-MINI" - }, - "messages": { - "values": [ - { - "content": "You're an intelligent bot perfect at creating cover letters for a job. Please take the candidates resume and create a customized cover letter to the job.", - "role": "system" - }, - { - "content": "=Here's the job listing details:\nTitle: {{ $('RSS Read').item.json.title }}\nDescription: {{ $('OpenAI').item.json.message.content.job_description }}\nName: {{ $('OpenAI').item.json.message.content.company_name }}\nLocation: {{ $('OpenAI').item.json.message.content.location }}\n\nHere is the candidates skill set:\n\nHere's a detailed summary of your CV and the skills you've highlighted throughout your experience:\n\n---\n\n### **Profile Overview**\n\nDivij Bajaj is a **Data Scientist** with over **5 years** of hands-on experience in building, scaling, and deploying machine learning models that solve real-world business problems. Your expertise spans various domains, with a focus on **Generative AI**, **cloud technologies**, and **business analytics**. You have a strong ability to bridge the gap between data science and business, using data to drive impactful decisions and improvements in products and processes.\n\n### **Core Competencies**\n\n* **Generative AI**: Experience with cutting-edge generative tools and prompt engineering.\n* **Machine Learning & AI**: Expertise in building and deploying machine learning models using frameworks like **TensorFlow** and **PyTorch**.\n* **Deep Learning**: Strong knowledge in deep learning techniques like **Neural Networks**, **Convolutional Neural Networks (CNN)**, and **Recurrent Neural Networks (RNN)**.\n* **Recommendation Systems**: Experienced in **Collaborative Filtering**, **Matrix Factorization**, and **Dimensionality Reduction** for building scalable recommendation engines.\n* **Cloud Platforms**: Well-versed in **Google Cloud Platform** (GCP), **Azure**, and cloud-based solutions to scale data processing and machine learning models.\n* **Programming**: Proficient in **Python**, **SQL**, and **R**, with a deep understanding of libraries like **Scikit-learn**, **Pandas**, and **NumPy**.\n* **Data Engineering & ETL**: Comfortable with creating and managing data pipelines, ETL processes, and working with large datasets.\n* **Statistical Analysis & A/B Testing**: Strong foundation in performing statistical analysis, hypothesis testing, and A/B testing to optimize models and business decisions.\n* **Product Analytics**: Expertise in analyzing user behavior, building systems to improve user experiences, and generating recommendations.\n* **Collaboration**: Able to work effectively with cross-functional teams including engineering, product management, and marketing to derive actionable insights from data.\n\n### **Key Achievements**\n\n* **Awards**: Best Outgoing Student and Batch Topper in **MBA-Data Science & Analytics** (210 students) from **Symbiosis International University**.\n* **Research Excellence**: Awarded **First Prize** for **Best Research Paper** and Presenter at **MIT University**.\n* **Competitions**: Winner of the **First-ever Machine Learning Competition** at **VMware** and **Shark-Tank Finalist**.\n* **Publications**: Contributed notable papers like \"Conversational System, Intelligent Virtual Assistant Named DIVA Using Raspberry Pi\".\n* **Industry Impact**: Built systems that increased **Azure Market Share** and significantly improved **customer targeting** and **marketing campaigns**.\n\n### **Professional Experience**\n\n#### **Microsoft (May 2024 - Present)**\n\n* **Role**: Data & Applied Scientist II, working on **AI-powered solutions** for **EdTech** and **Cloud Operations**.\n* **Key Projects**:\n\n * Anomaly detection and **Root Cause Mitigation Assistant**.\n * Developed a recommendation system for **Azure Subscription Services** to target high-propensity customers and increase market share.\n * **AI/ML techniques** used: **Deep Learning**, **Collaborative Filtering**, **Matrix Factorization**, **A/B Testing**.\n\n#### **VMware (June 2021 - Dec 2021)**\n\n* **Role**: Data Scientist II, focusing on **AI/ML** for marketing and product analytics.\n* **Key Contributions**:\n\n * Developed recommendation systems to improve **customer engagement** and optimize marketing campaigns.\n * Used **Markov Chains** and **Apriori Algorithm** for product recommendations and **NLG** for insights generation.\n * Impacted business metrics by optimizing **campaign targeting** and **ROI**.\n\n#### **Hyper Filteration Pvt. Ltd. (June 2017 - June 2018)**\n\n* **Role**: Embedded Software Engineer, working on **Waste Water Management Solutions** for industrial clients.\n* **Key Projects**: Developed solutions based on **data analysis** to reduce water consumption and manage effluent treatment for large-scale industries.\n\n#### **Education**\n\n* **M.Tech in AI & Machine Learning**, **University of Hyderabad** (2020 - 2022)\n* **Masters in Data Science & Business Analytics**, **Symbiosis International University** (2018 - 2020)\n* **B.Tech in Electronics and Communication**, **JSS Academy of Technical Education** (2013 - 2017)\n\n#### **Certifications**\n\n* **Machine Learning with TensorFlow on Google Cloud**\n* **Google Cloud Platform Fundamentals: Core Infrastructure**\n* **Computer Vision - Object Detection with OpenCV and Python**\n\n---\n\n### **Skills Summary**\n\n* **Advanced Analytics**: You possess strong expertise in **predictive modeling**, **statistical analysis**, and **data-driven decision-making** to solve business challenges.\n* **Machine Learning Expertise**: Deep understanding of core **ML algorithms** and frameworks like **TensorFlow**, **PyTorch**, and **Scikit-learn**.\n* **Cloud & Data Engineering**: Proficient in scaling machine learning models in **cloud environments** and handling large datasets.\n* **Business Focus**: You have a proven ability to align data science projects with business goals, improving customer experience, driving sales, and influencing product development.\n* **Technical Communication**: Skilled at presenting complex data and insights to non-technical stakeholders, ensuring clarity and actionable outcomes.\n\n---\n\n### **Conclusion**\n\nYour profile highlights a well-rounded blend of technical expertise and business acumen. You have proven experience in solving large-scale business problems with **data science** and **machine learning** techniques, making you a strong candidate for roles involving advanced analytics, product decision-making, and AI-powered product development.\n" - }, - { - "content": "Please return JSON data structured the following way:\n{\n\"cover_letter\": \"\"\n}", - "role": "assistant" - } - ] - }, - "simplify": false, - "jsonOutput": true, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.openAi", - "typeVersion": 1.8, - "position": [ - 1040, - 80 - ], - "id": "ce758f16-6c6c-43bf-b523-edce222d2f87", - "name": "Message a model1", - "credentials": { - "openAiApi": { - "id": "c0jeJgIzLB3A6YaN", - "name": "OpenAi account" - } - } - }, - { - "parameters": { - "assignments": { - "assignments": [ - { - "id": "993ff775-0e35-4844-8b30-6d19f605cbfa", - "name": "RSS feeds", - "value": "[\"https://rss.app/feeds/eLi2CzXTaHeeHkNE.xml\", \"https://rss.app/feeds/eLi2CzXTaHeeHkNE.xml\"]", - "type": "array" - } - ] - }, - "options": {} - }, - "type": "n8n-nodes-base.set", - "typeVersion": 3.4, - "position": [ - 944, - -528 - ], - "id": "a5242c2c-6e55-470e-afae-80d855644cce", - "name": "Edit Fields1" - } - ], - "pinData": {}, - "connections": { - "When clicking โ€˜Execute workflowโ€™": { - "main": [ - [ - { - "node": "RSS Read", - "type": "main", - "index": 0 - } - ] - ] - }, - "RSS Read": { - "main": [ - [ - { - "node": "Limit", - "type": "main", - "index": 0 - } - ] - ] - }, - "HTTP Request": { - "main": [ - [ - { - "node": "OpenAI", - "type": "main", - "index": 0 - } - ] - ] - }, - "OpenAI": { - "main": [ - [ - { - "node": "Message a model", - "type": "main", - "index": 0 - } - ] - ] - }, - "Limit": { - "main": [ - [ - { - "node": "HTTP Request", - "type": "main", - "index": 0 - } - ] - ] - }, - "Schedule Trigger1": { - "main": [ - [ - { - "node": "Edit Fields1", - "type": "main", - "index": 0 - } - ] - ] - }, - "Split Out": { - "main": [ - [ - { - "node": "RSS Read1", - "type": "main", - "index": 0 - } - ] - ] - }, - "Message a model": { - "main": [ - [ - { - "node": "Message a model1", - "type": "main", - "index": 0 - } - ] - ] - }, - "Message a model1": { - "main": [ - [ - { - "node": "Append or update row in sheet", - "type": "main", - "index": 0 - } - ] - ] - }, - "Edit Fields1": { - "main": [ - [ - { - "node": "Split Out", - "type": "main", - "index": 0 - } - ] - ] - } - }, - "active": false, - "settings": { - "executionOrder": "v1" - }, - "versionId": "51197cc2-6246-4afb-a0b9-c249b5ffac8c", - "meta": { - "templateCredsSetupCompleted": true, - "instanceId": "a82628698b927683159b5a7f0404d78adbdba03d82e0812e3ec073ae963af706" - }, - "id": "PNSnKTDdb95LepyW", - "tags": [] -} \ No newline at end of file diff --git a/Nandini_Reddy/Day4/Personalised_Newsletter_Tavily API Web search (1).json b/Nandini_Reddy/Day4/Personalised_Newsletter_Tavily API Web search (1).json deleted file mode 100644 index 1800977..0000000 --- a/Nandini_Reddy/Day4/Personalised_Newsletter_Tavily API Web search (1).json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "name": "Personalised_Newsletter_Tavily API Web search", - "nodes": [ - { - "parameters": { - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.chatTrigger", - "typeVersion": 1.3, - "position": [ - -176, - 32 - ], - "id": "364bd6c6-4afd-46e8-986a-8fce4ebe3c0f", - "name": "When chat message received", - "webhookId": "ca2786e2-7bdd-438c-b0e4-bf77da223786" - }, - { - "parameters": { - "options": { - "systemMessage": "Structure the response in bullet points once you fetch from web before ending it on gmail\n\n\n\n\n\n\n" - } - }, - "type": "@n8n/n8n-nodes-langchain.agent", - "typeVersion": 2.2, - "position": [ - 32, - 32 - ], - "id": "52976c7c-ecd1-4810-83e3-ffabe7120794", - "name": "AI Agent" - }, - { - "parameters": { - "contextWindowLength": 50 - }, - "type": "@n8n/n8n-nodes-langchain.memoryBufferWindow", - "typeVersion": 1.3, - "position": [ - 48, - 240 - ], - "id": "987f1a96-a5b5-4b4f-92d6-70dbd44b542a", - "name": "Simple Memory" - }, - { - "parameters": { - "sendTo": "={{ /*n8n-auto-generated-fromAI-override*/ $fromAI('To', ``, 'string') }}", - "subject": "={{ /*n8n-auto-generated-fromAI-override*/ $fromAI('Subject', ``, 'string') }}", - "message": "={{ /*n8n-auto-generated-fromAI-override*/ $fromAI('Message', ``, 'string') }}", - "options": {} - }, - "type": "n8n-nodes-base.gmailTool", - "typeVersion": 2.1, - "position": [ - 464, - 192 - ], - "id": "83961125-a0ef-4181-aab7-4ebffdd39f31", - "name": "Send a message in Gmail", - "webhookId": "34dd4346-1ab3-4139-8866-4ca14e21522d", - "credentials": { - "gmailOAuth2": { - "id": "X1BczLEC7z1DZTkf", - "name": "Gmail account" - } - } - }, - { - "parameters": { - "method": "POST", - "url": "https://api.tavily.com/search", - "sendHeaders": true, - "headerParameters": { - "parameters": [ - { - "name": "Authorization", - "value": "Bearer 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" - } - ] - }, - "sendBody": true, - "bodyParameters": { - "parameters": [ - { - "name": "query", - "value": "={{ /*n8n-auto-generated-fromAI-override*/ $fromAI('parameters0_Value', ``, 'string') }}" - } - ] - }, - "options": {} - }, - "type": "n8n-nodes-base.httpRequestTool", - "typeVersion": 4.2, - "position": [ - 288, - 240 - ], - "id": "9bbe9906-a569-4d75-ba1c-37af630dc476", - "name": "HTTP Request" - }, - { - "parameters": { - "model": { - "__rl": true, - "mode": "list", - "value": "gpt-4.1-mini" - }, - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi", - "typeVersion": 1.2, - "position": [ - -96, - 240 - ], - "id": "c7b54c9d-8c05-4b6b-b167-619cea259eaa", - "name": "OpenAI Chat Model", - "credentials": { - "openAiApi": { - "id": "c0jeJgIzLB3A6YaN", - "name": "OpenAi account" - } - } - } - ], - "pinData": {}, - "connections": { - "When chat message received": { - "main": [ - [ - { - "node": "AI Agent", - "type": "main", - "index": 0 - } - ] - ] - }, - "Simple Memory": { - "ai_memory": [ - [ - { - "node": "AI Agent", - "type": "ai_memory", - "index": 0 - } - ] - ] - }, - "Send a message in Gmail": { - "ai_tool": [ - [ - { - "node": "AI Agent", - "type": "ai_tool", - "index": 0 - } - ] - ] - }, - "HTTP Request": { - "ai_tool": [ - [ - { - "node": "AI Agent", - "type": "ai_tool", - "index": 0 - } - ] - ] - }, - "OpenAI Chat Model": { - "ai_languageModel": [ - [ - { - "node": "AI Agent", - "type": "ai_languageModel", - "index": 0 - } - ] - ] - } - }, - "active": false, - "settings": { - "executionOrder": "v1" - }, - "versionId": "fa9bfe5c-722b-4c9f-8ce2-6e3b33662736", - "meta": { - "instanceId": "a82628698b927683159b5a7f0404d78adbdba03d82e0812e3ec073ae963af706" - }, - "id": "KsHC6TbCCgpZOzYE", - "tags": [] -} \ No newline at end of file diff --git a/Nandini_Reddy/Day5/Bolt_n8n_Viral_LinkedinPost_Creator (1).json b/Nandini_Reddy/Day5/Bolt_n8n_Viral_LinkedinPost_Creator (1).json deleted file mode 100644 index d952e7f..0000000 --- a/Nandini_Reddy/Day5/Bolt_n8n_Viral_LinkedinPost_Creator (1).json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "name": "Bolt_n8n_Viral_LinkedinPost_Creator", - "nodes": [ - { - "parameters": { - "httpMethod": "POST", - "path": "ee3e9945-97dc-470e-9ee5-053d528e15eb", - "responseMode": "responseNode", - "options": {} - }, - "type": "n8n-nodes-base.webhook", - "typeVersion": 2.1, - "position": [ - 64, - 48 - ], - "id": "a789733a-a2d9-4986-9665-871ead1101a0", - "name": "Webhook", - "webhookId": "ee3e9945-97dc-470e-9ee5-053d528e15eb" - }, - { - "parameters": { - "promptType": "define", - "text": "=\"Create a LinkedIn post with the following details:\\n\\n- Theme: -{{ $json.body.theme }} Category:{{ $json.body.category }} \\n- Tone: {{ $json.body.tone }}\\n- Length:{{ $json.body.length }} \n\n\\n\\nGuidelines:\\n1. Start with a strong hook to capture attention in the first 2 lines.\\n2. Keep the writing style engaging, professional, and authentic.\\n3. Make it easy to read with short sentences and clear flow.\\n4. Ensure the post fits LinkedInโ€™s style (not too casual, not too academic).\\n5. End with either a takeaway, a motivational line, or a subtle call-to-action (without sounding salesy).\\n\\nNow, generate the LinkedIn post.\"\n\nDon't use * and \\n in response. Format the post in paragraphs to look good for Linkedin", - "options": { - "systemMessage": "You are an expert LinkedIn content creator who writes highly engaging, professional, and authentic posts tailored to specific needs." - } - }, - "type": "@n8n/n8n-nodes-langchain.agent", - "typeVersion": 2.2, - "position": [ - 272, - 48 - ], - "id": "b6ca2080-8bf0-4bc7-933f-1818d8dee2fe", - "name": "AI Agent" - }, - { - "parameters": { - "respondWith": "text", - "responseBody": "={{ $json.output }}", - "options": {} - }, - "type": "n8n-nodes-base.respondToWebhook", - "typeVersion": 1.4, - "position": [ - 624, - 48 - ], - "id": "d1eb21c2-5bb0-4c0b-bafd-41239cd64c93", - "name": "Respond to Webhook" - }, - { - "parameters": { - "httpMethod": "POST", - "path": "20c84917-2446-49f4-9270-28efb9d31ed5", - "options": {} - }, - "type": "n8n-nodes-base.webhook", - "typeVersion": 2.1, - "position": [ - 224, - 384 - ], - "id": "748caea0-a79c-4015-9b8f-a815ed8713a9", - "name": "Webhook1", - "webhookId": "20c84917-2446-49f4-9270-28efb9d31ed5" - }, - { - "parameters": { - "person": "xiDBYnQKJo", - "text": "={{ $json.body.post_text }}", - "additionalFields": {} - }, - "type": "n8n-nodes-base.linkedIn", - "typeVersion": 1, - "position": [ - 576, - 384 - ], - "id": "cbce30cc-8efa-4eb9-af14-092980cda138", - "name": "Create a post", - "credentials": { - "linkedInOAuth2Api": { - "id": "gPY8wYS8m6YzZ26H", - "name": "LinkedIn account" - } - } - }, - { - "parameters": { - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.lmChatOpenRouter", - "typeVersion": 1, - "position": [ - 176, - 224 - ], - "id": "c065dec5-18aa-4ffe-968c-522fa410077a", - "name": "OpenRouter Chat Model", - "credentials": { - "openRouterApi": { - "id": "AZWMr3srU1lMFZeh", - "name": "OpenRouter account" - } - } - } - ], - "pinData": {}, - "connections": { - "Webhook": { - "main": [ - [ - { - "node": "AI Agent", - "type": "main", - "index": 0 - } - ] - ] - }, - "AI Agent": { - "main": [ - [ - { - "node": "Respond to Webhook", - "type": "main", - "index": 0 - } - ] - ] - }, - "Webhook1": { - "main": [ - [ - { - "node": "Create a post", - "type": "main", - "index": 0 - } - ] - ] - }, - "OpenRouter Chat Model": { - "ai_languageModel": [ - [ - { - "node": "AI Agent", - "type": "ai_languageModel", - "index": 0 - } - ] - ] - } - }, - "active": false, - "settings": { - "executionOrder": "v1" - }, - "versionId": "25bce9b5-2634-47e7-8381-0dd688482ce0", - "meta": { - "templateCredsSetupCompleted": true, - "instanceId": "a82628698b927683159b5a7f0404d78adbdba03d82e0812e3ec073ae963af706" - }, - "id": "kpT5oj9Mr6Z4azaE", - "tags": [] -} \ No newline at end of file diff --git a/Nandini_Reddy/Day5/bolt-config.json b/Nandini_Reddy/Day5/bolt-config.json deleted file mode 100644 index 89c7603..0000000 --- a/Nandini_Reddy/Day5/bolt-config.json +++ /dev/null @@ -1,226 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Viral LinkedIn Post Generator - Bolt Schema", - "description": "A complete configuration for building a LinkedIn post generator with AI-powered content creation and publishing capabilities", - "type": "object", - - "project_meta": { - "name": "viral-linkedin-post-generator", - "description": "Create engaging LinkedIn posts with AI assistance. Customize your content with different themes, tones, and formats.", - "version": "1.0.0", - "framework": "React + TypeScript + Vite", - "styling": "Tailwind CSS", - "icons": "Lucide React" - }, - - "properties": { - "theme": { - "type": "string", - "title": "Theme / Idea", - "description": "Enter the theme or idea for the post (e.g. 'AI in healthcare')", - "placeholder": "Enter the theme or idea for the post (e.g. 'AI in healthcare')", - "minLength": 1, - "required": true - }, - "post_type": { - "type": "string", - "title": "Post Type / Category", - "enum": [ - "Thought Leadership", - "Industry Insights", - "How-to / Educational", - "Personal Story", - "Opinion", - "Announcement", - "Case Study" - ], - "required": true - }, - "length": { - "type": "string", - "title": "Post Length", - "enum": [ - "Short (1-3 lines)", - "Medium (3-8 lines)", - "Long (8+ lines)" - ], - "required": true - }, - "tone": { - "type": "string", - "title": "Tone", - "enum": [ - "Inspirational", - "Personal Story", - "Professional / Formal", - "GenZ / Casual", - "Humorous", - "Motivational" - ], - "required": true - } - }, - "required": ["theme", "post_type", "length", "tone"], - - "ui": { - "layout": { - "type": "grid", - "columns": 2, - "responsive": true, - "order": ["theme", "post_type", "length", "tone"] - }, - - "design": { - "theme": "professional", - "color_scheme": "blue_gradient", - "background": "gradient-to-br from-blue-50 via-white to-blue-50", - "primary_color": "blue-600", - "accent_color": "green-600", - "card_style": "rounded-2xl shadow-lg", - "spacing": "generous" - }, - - "actions": { - "generate": { - "label": "Generate Post", - "icon": "Sparkles", - "type": "button", - "style": "primary", - "method": "POST", - "url": "https://nandini3.app.n8n.cloud/webhook-test/ee3e9945-97dc-470e-9ee5-053d528e15eb", - "contentType": "application/json", - "body": { - "theme": "{{theme}}", - "post_type": "{{post_type}}", - "length": "{{length}}", - "tone": "{{tone}}" - }, - "response": { - "type": "text", - "map_to": "generated_post" - }, - "loading_text": "Generating...", - "loading_icon": "Loader2" - }, - - "publish": { - "label": "Publish Post", - "icon": "Send", - "type": "button", - "style": "success", - "method": "POST", - "url": "https://nandini3.app.n8n.cloud/webhook-test/20c84917-2446-49f4-9270-28efb9d31ed5", - "contentType": "application/json", - "body": { - "post_text": "{{generated_post}}", - "metadata": { - "theme": "{{theme}}", - "post_type": "{{post_type}}", - "length": "{{length}}", - "tone": "{{tone}}" - } - }, - "response": { - "type": "json", - "map_to": "publish_response" - }, - "loading_text": "Publishing...", - "loading_icon": "Loader2", - "success_message": "Post published successfully!" - }, - - "reset": { - "label": "Reset Form", - "type": "button", - "style": "secondary", - "action": "reset_form" - } - }, - - "fields": { - "generated_post": { - "type": "string", - "widget": "textarea", - "title": "Generated LinkedIn Post", - "readOnly": false, - "rows": 12, - "placeholder": "Generated post will appear here. You can edit before publishing.", - "style": "resize-none" - } - }, - - "status_messages": { - "error": { - "style": "bg-red-50 border-red-200 text-red-700", - "icon": "AlertCircle" - }, - "success": { - "style": "bg-green-50 border-green-200 text-green-700", - "icon": "CheckCircle" - }, - "loading": { - "style": "bg-blue-50 border-blue-200 text-blue-700", - "icon": "Loader2" - } - } - }, - - "dependencies": { - "react": "^18.3.1", - "react-dom": "^18.3.1", - "lucide-react": "^0.344.0", - "tailwindcss": "^3.4.1", - "typescript": "^5.5.3", - "vite": "^5.4.2", - "@vitejs/plugin-react": "^4.3.1" - }, - - "bolt_instructions": { - "framework": "Use React with TypeScript and Vite", - "styling": "Use Tailwind CSS for all styling", - "icons": "Use Lucide React icons only", - "design_principles": [ - "Create beautiful, production-worthy designs", - "Use professional LinkedIn-inspired color scheme", - "Implement smooth animations and hover states", - "Ensure mobile responsiveness", - "Focus on user experience and accessibility" - ], - "functionality": [ - "Form validation for all required fields", - "Loading states for all async operations", - "Error handling with user-friendly messages", - "Success feedback for completed actions", - "Editable generated content before publishing", - "Form reset functionality" - ] - }, - - "bolt_meta": { - "notes": [ - "This configuration creates a complete LinkedIn post generator", - "The generate action calls the first webhook to create AI content", - "The publish action uses a separate webhook for posting to LinkedIn", - "Generated posts are editable before publishing", - "All form fields are validated before API calls", - "Responsive design works on all device sizes" - ], - - "api_integration": { - "generate_endpoint": "https://nandini3.app.n8n.cloud/webhook-test/ee3e9945-97dc-470e-9ee5-053d528e15eb", - "publish_endpoint": "https://nandini3.app.n8n.cloud/webhook-test/20c84917-2446-49f4-9270-28efb9d31ed5", - "content_type": "application/json", - "expected_responses": { - "generate": "Plain text response containing the generated post", - "publish": "JSON response with success status and optional message/post_id" - } - }, - - "development_notes": { - "cors_handling": "May require CORS configuration or proxy setup for local development", - "error_handling": "Implements comprehensive error handling for network issues", - "state_management": "Uses React hooks for state management", - "performance": "Optimized for fast loading and smooth interactions" - } - } -} \ No newline at end of file diff --git a/Nandini_Reddy/Day6/NRLlmaIndex_RAG_with_Ollama_running_in_Colab_environment.ipynb b/Nandini_Reddy/Day6/NRLlmaIndex_RAG_with_Ollama_running_in_Colab_environment.ipynb deleted file mode 100644 index 917fb70..0000000 --- a/Nandini_Reddy/Day6/NRLlmaIndex_RAG_with_Ollama_running_in_Colab_environment.ipynb +++ /dev/null @@ -1,1021 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" - }, - "cells": [ - { - "cell_type": "code", - "execution_count": 16, - "metadata": { - "id": "LaxIKexiPhi7" - }, - "outputs": [], - "source": [ - "# # Install all required packages\n", - "!pip install jedi llama-index llama-index-vector-stores-lancedb llama-index-embeddings-huggingface llama-index-llms-huggingface-api lancedb datasets -q\n", - "\n", - "# # Additional packages for local LLM and utilities\n", - "!pip install llama-index-llms-ollama requests -q" - ] - }, - { - "cell_type": "code", - "source": [ - "import os\n", - "import lancedb\n", - "import subprocess\n", - "import requests\n", - "import time\n", - "from pathlib import Path\n", - "from datasets import load_dataset\n", - "\n", - "# LlamaIndex core components\n", - "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Document\n", - "from llama_index.core.node_parser import SentenceSplitter\n", - "from llama_index.core.ingestion import IngestionPipeline\n", - "\n", - "# Embedding and vector store\n", - "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", - "from llama_index.vector_stores.lancedb import LanceDBVectorStore\n", - "\n", - "# LLM integrations\n", - "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n", - "from llama_index.llms.ollama import Ollama\n", - "\n", - "# Async support for notebooks\n", - "import nest_asyncio\n", - "nest_asyncio.apply()\n", - "\n", - "print(\"All libraries imported successfully\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "qLGlWmPQQGm-", - "outputId": "532b81ba-a51b-499e-8f8c-379434c832fb" - }, - "execution_count": 17, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "All libraries imported successfully\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "def prepare_data(num_samples=100):\n", - " \"\"\"\n", - " Load dataset and create document files\n", - " \"\"\"\n", - " print(f\"Loading {num_samples} personas from dataset...\")\n", - "\n", - " # Load the personas dataset\n", - " dataset = load_dataset(\"dvilasuero/finepersonas-v0.1-tiny\", split=\"train\")\n", - "\n", - " # Create data directory\n", - " Path(\"data\").mkdir(parents=True, exist_ok=True)\n", - "\n", - " # Save personas as text files and create Document objects\n", - " documents = []\n", - " for i, persona in enumerate(dataset.select(range(min(num_samples, len(dataset))))):\n", - " # Create Document objects for LlamaIndex\n", - " doc = Document(\n", - " text=persona[\"persona\"],\n", - " metadata={\n", - " \"persona_id\": i,\n", - " \"source\": \"finepersonas-dataset\"\n", - " }\n", - " )\n", - " documents.append(doc)\n", - "\n", - " # Optionally save to files as well\n", - " with open(Path(\"data\") / f\"persona_{i}.txt\", \"w\", encoding=\"utf-8\") as f:\n", - " f.write(persona[\"persona\"])\n", - "\n", - " print(f\"Prepared {len(documents)} documents\")\n", - " return documents\n", - "\n", - "# Load the data\n", - "documents = prepare_data(num_samples=100)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "HAc6RLYaQRIn", - "outputId": "e337ca3e-a29a-4430-eebe-117d8292c3cb" - }, - "execution_count": 18, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Loading 100 personas from dataset...\n", - "Prepared 100 documents\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "def setup_lancedb_store(table_name=\"personas_rag\"):\n", - " \"\"\"\n", - " Initialize LanceDB and create/connect to a table\n", - " \"\"\"\n", - " print(\"Setting up LanceDB connection...\")\n", - "\n", - " # Create or connect to LanceDB\n", - " db = lancedb.connect(\"./lancedb_data\")\n", - "\n", - " # LlamaIndex will handle table creation with proper schema\n", - " print(f\"Connected to LanceDB {db}, table: {table_name}\")\n", - "\n", - " return db, table_name\n", - "\n", - "# Setup database connection\n", - "db, table_name = setup_lancedb_store()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "pH-vpYC5Qd3g", - "outputId": "2945ae17-6b83-4f1b-a888-40e0799eb322" - }, - "execution_count": 19, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Setting up LanceDB connection...\n", - "Connected to LanceDB LanceDBConnection(uri='/content/lancedb_data'), table: personas_rag\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "async def create_and_populate_index(documents, db, table_name):\n", - " \"\"\"\n", - " Create ingestion pipeline and populate LanceDB with embeddings\n", - " \"\"\"\n", - " print(\"Creating embedding model and ingestion pipeline...\")\n", - "\n", - " # Initialize embedding model\n", - " embed_model = HuggingFaceEmbedding(\n", - " model_name=\"BAAI/bge-small-en-v1.5\"\n", - " )\n", - "\n", - " # Create LanceDB vector store\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./lancedb_data\",\n", - " table_name=table_name,\n", - " mode=\"overwrite\" # overwrite existing table\n", - " )\n", - "\n", - " # Create ingestion pipeline\n", - " pipeline = IngestionPipeline(\n", - " transformations=[\n", - " SentenceSplitter(chunk_size=512, chunk_overlap=20),\n", - " embed_model,\n", - " ],\n", - " vector_store=vector_store,\n", - " )\n", - "\n", - " print(\"Processing documents and creating embeddings...\")\n", - " # Run the pipeline to process documents and store in LanceDB\n", - " nodes = await pipeline.arun(documents=documents)\n", - " print(f\"Successfully processed {len(nodes)} text chunks\")\n", - "\n", - " return vector_store, embed_model\n", - "\n", - "# Create embeddings and populate vector store\n", - "vector_store, embed_model = await create_and_populate_index(documents, db, table_name)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "y243xaIsQqE8", - "outputId": "5eaa0345-2367-493a-e767-16f557151f83" - }, - "execution_count": 20, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Creating embedding model and ingestion pipeline...\n", - "Processing documents and creating embeddings...\n", - "Successfully processed 100 text chunks\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "def perform_vector_search(db, table_name, query_text, embed_model, top_k=5):\n", - " \"\"\"\n", - " Perform direct vector search on LanceDB\n", - " \"\"\"\n", - " # Get query embedding\n", - " query_embedding = embed_model.get_text_embedding(query_text)\n", - "\n", - " # Open table and perform search\n", - " table = db.open_table(table_name)\n", - " results = table.search(query_embedding).limit(top_k).to_pandas()\n", - "\n", - " return results\n", - "\n", - "def test_vector_search():\n", - " \"\"\"\n", - " Test vector search functionality with sample queries\n", - " \"\"\"\n", - " print(\"Testing Vector Search (No LLM needed)\")\n", - " print(\"=\" * 50)\n", - "\n", - " # Test queries\n", - " queries = [\n", - " \"technology and artificial intelligence expert\",\n", - " \"teacher educator professor\",\n", - " \"environment climate sustainability\",\n", - " \"art culture heritage creative\"\n", - " ]\n", - "\n", - " for query in queries:\n", - " print(f\"\\nQuery: {query}\")\n", - " print(\"-\" * 30)\n", - "\n", - " # Perform search\n", - " results = perform_vector_search(db, table_name, query, embed_model, top_k=3)\n", - "\n", - " for idx, row in results.iterrows():\n", - " score = row.get('_distance', 'N/A')\n", - " text = row.get('text', 'N/A')\n", - "\n", - " # Format score\n", - " if isinstance(score, (int, float)):\n", - " score_str = f\"{score:.3f}\"\n", - " else:\n", - " score_str = str(score)\n", - "\n", - " print(f\"\\nResult {idx + 1} (Score: {score_str}):\")\n", - " print(f\"{text[:200]}...\")\n", - "\n", - "# Run vector search test\n", - "test_vector_search()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "V3SsP2SDQ09d", - "outputId": "f5c3f284-19ae-430b-a4f3-67c4ac10d117" - }, - "execution_count": 21, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Testing Vector Search (No LLM needed)\n", - "==================================================\n", - "\n", - "Query: technology and artificial intelligence expert\n", - "------------------------------\n", - "\n", - "Result 1 (Score: 0.589):\n", - "A computer scientist or electronics engineer researching alternative sustainable materials for neuromorphic computing and memristor development or A science journalist covering emerging technologies a...\n", - "\n", - "Result 2 (Score: 0.589):\n", - "A computer scientist or electronics engineer researching alternative sustainable materials for neuromorphic computing and memristor development or A science journalist covering emerging technologies a...\n", - "\n", - "Result 3 (Score: 0.626):\n", - "An aerospace engineer or astrobiologist interested in innovative technologies for space exploration....\n", - "\n", - "Query: teacher educator professor\n", - "------------------------------\n", - "\n", - "Result 1 (Score: 0.577):\n", - "An English language arts teacher with a focus on upper elementary education....\n", - "\n", - "Result 2 (Score: 0.577):\n", - "An English language arts teacher with a focus on upper elementary education....\n", - "\n", - "Result 3 (Score: 0.584):\n", - "An educator, possibly a middle school geography teacher or an NCERT textbook author, focused on creating educational content for Class 7 students studying geography, with an emphasis on clear explanat...\n", - "\n", - "Query: environment climate sustainability\n", - "------------------------------\n", - "\n", - "Result 1 (Score: 0.683):\n", - "An environmental scientist focused on climate change and pollution issues, or a sustainability advocate pushing for global action on reducing greenhouse gas emissions....\n", - "\n", - "Result 2 (Score: 0.683):\n", - "An environmental scientist focused on climate change and pollution issues, or a sustainability advocate pushing for global action on reducing greenhouse gas emissions....\n", - "\n", - "Result 3 (Score: 0.723):\n", - "An environmental policy analyst focused on sustainable transportation alternatives, or an automotive engineer with a specialization in electric vehicles, likely wrote this text, aiming to inform the g...\n", - "\n", - "Query: art culture heritage creative\n", - "------------------------------\n", - "\n", - "Result 1 (Score: 0.625):\n", - "A local art historian and museum professional interested in 19th-century American art and the local cultural heritage of Cincinnati....\n", - "\n", - "Result 2 (Score: 0.625):\n", - "A local art historian and museum professional interested in 19th-century American art and the local cultural heritage of Cincinnati....\n", - "\n", - "Result 3 (Score: 0.686):\n", - "An art historian or scholar of classical Chinese culture, particularly one fascinated by the intersection of nature, spirituality, and art in ancient Chinese civilization....\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "RAG with HF API" - ], - "metadata": { - "id": "b4aLEO8WRtBZ" - } - }, - { - "cell_type": "code", - "source": [ - "# Set your HuggingFace API token here\n", - "# Get your free token from: https://huggingface.co/settings/tokens\n", - "os.environ[\"HUGGINGFACE_API_KEY\"] = \"\" # Replace with your actual token\n", - "\n", - "def create_query_engine(vector_store, embed_model, llm=None):\n", - " \"\"\"\n", - " Create a query engine from the vector store\n", - " \"\"\"\n", - " # Create index from vector store\n", - " index = VectorStoreIndex.from_vector_store(\n", - " vector_store=vector_store,\n", - " embed_model=embed_model\n", - " )\n", - "\n", - " # Setup LLM if provided\n", - " query_engine_kwargs = {}\n", - " if llm:\n", - " query_engine_kwargs['llm'] = llm\n", - "\n", - " # Create query engine\n", - " query_engine = index.as_query_engine(\n", - " response_mode=\"tree_summarize\",\n", - " **query_engine_kwargs\n", - " )\n", - "\n", - " return query_engine\n", - "\n", - "def query_rag(query_engine, question):\n", - " \"\"\"\n", - " Query the RAG system and return response\n", - " \"\"\"\n", - " response = query_engine.query(question)\n", - " return response\n", - "\n", - "async def test_huggingface_rag():\n", - " \"\"\"\n", - " Test RAG with HuggingFace API\n", - " \"\"\"\n", - " print(\"Testing RAG with HuggingFace API\")\n", - " print(\"=\" * 40)\n", - "\n", - " try:\n", - " # Initialize HuggingFace LLM with authentication\n", - " llm = HuggingFaceInferenceAPI(\n", - " model_name=\"HuggingFaceH4/zephyr-7b-beta\",\n", - " token=os.environ.get(\"HUGGINGFACE_API_KEY\")\n", - " )\n", - "\n", - " # Create query engine\n", - " query_engine = create_query_engine(vector_store, embed_model, llm)\n", - "\n", - " # Test queries\n", - " queries = [\n", - " \"Find personas interested in technology and AI\",\n", - " \"Who are the educators or teachers in the dataset?\",\n", - " \"Describe personas working with environmental topics\"\n", - " ]\n", - "\n", - " for query in queries:\n", - " print(f\"\\nQuery: {query}\")\n", - " print(\"-\" * 30)\n", - "\n", - " try:\n", - " response = query_rag(query_engine, query)\n", - " print(f\"Response: {response}\")\n", - " except Exception as e:\n", - " print(f\"Error: {e}\")\n", - "\n", - " except Exception as e:\n", - " print(f\"Setup error: {e}\")\n", - " print(\"Make sure to set your HuggingFace API token above\")\n", - "\n", - "# Uncomment the line below after setting your API token\n", - "await test_huggingface_rag()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "YY0lDmMDQ_7G", - "outputId": "094eeda8-fe46-4576-fa24-ad9fb3f19cc1" - }, - "execution_count": 22, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Testing RAG with HuggingFace API\n", - "========================================\n", - "\n", - "Query: Find personas interested in technology and AI\n", - "------------------------------\n", - "Error: 404 Client Error: Not Found for url: https://router.huggingface.co/hf-inference/models/HuggingFaceH4/zephyr-7b-beta/v1/chat/completions (Request ID: Root=1-6905ca61-36198b2b2300fa526a768d4b;1262e825-d8b2-42ce-a0ef-7322953984e6)\n", - "\n", - "Query: Who are the educators or teachers in the dataset?\n", - "------------------------------\n", - "Error: 404 Client Error: Not Found for url: https://router.huggingface.co/hf-inference/models/HuggingFaceH4/zephyr-7b-beta/v1/chat/completions (Request ID: Root=1-6905ca61-7c14ea813d99976210ebfb7b;ec2e6ba6-c75a-4179-af01-48c89c8ceb3f)\n", - "\n", - "Query: Describe personas working with environmental topics\n", - "------------------------------\n", - "Error: 404 Client Error: Not Found for url: https://router.huggingface.co/hf-inference/models/HuggingFaceH4/zephyr-7b-beta/v1/chat/completions (Request ID: Root=1-6905ca61-1eea41722a9835f82828e875;87d7ac41-38f7-4ea5-bf73-05bfdaf8adcd)\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "Install Ollama in Colab" - ], - "metadata": { - "id": "s6vpb15oXoil" - } - }, - { - "cell_type": "code", - "source": [ - "# !sudo apt update\n", - "# !sudo apt install -y pciutils\n", - "# !curl -fsSL https://ollama.com/install.sh | sh" - ], - "metadata": { - "id": "8Cgm3O7xSS_z" - }, - "execution_count": 23, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# !ollama show llama3.2:1b\n", - "!ollama pull gemma3:1b" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "1DQH17HmdanY", - "outputId": "14bc0c01-7185-4637-c439-c0788a0a99dd" - }, - "execution_count": 24, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "/bin/bash: line 1: ollama: command not found\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "import threading\n", - "import subprocess\n", - "import time\n", - "\n", - "def run_ollama_serve():\n", - " subprocess.Popen([\"ollama\", \"serve\"])\n", - "\n", - "thread = threading.Thread(target=run_ollama_serve)\n", - "thread.start()\n", - "time.sleep(5)" - ], - "metadata": { - "id": "AB9DyGrLYFX7", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "a9a3a0d7-e9df-496e-d057-daca662a15d4" - }, - "execution_count": 25, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "Exception in thread Thread-7 (run_ollama_serve):\n", - "Traceback (most recent call last):\n", - " File \"/usr/lib/python3.12/threading.py\", line 1075, in _bootstrap_inner\n", - " self.run()\n", - " File \"/usr/lib/python3.12/threading.py\", line 1012, in run\n", - " self._target(*self._args, **self._kwargs)\n", - " File \"/tmp/ipython-input-2856519456.py\", line 6, in run_ollama_serve\n", - " File \"/usr/lib/python3.12/subprocess.py\", line 1026, in __init__\n", - " self._execute_child(args, executable, preexec_fn, close_fds,\n", - " File \"/usr/lib/python3.12/subprocess.py\", line 1955, in _execute_child\n", - " raise child_exception_type(errno_num, err_msg, err_filename)\n", - "FileNotFoundError: [Errno 2] No such file or directory: 'ollama'\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "!ollama list" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "H6cGff8Eild0", - "outputId": "f18b57fc-f0d7-4270-8569-fe2243c895ce" - }, - "execution_count": 26, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "/bin/bash: line 1: ollama: command not found\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "03d1fe35", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "b6a539e5-6348-4988-b01c-16e81548ae24" - }, - "source": [ - "import subprocess\n", - "import signal\n", - "\n", - "def stop_ollama_serve():\n", - " \"\"\"\n", - " Find and stop the ollama serve process.\n", - " \"\"\"\n", - " try:\n", - " # Find the process ID of the ollama serve process\n", - " # This command works on Linux-like systems (Colab environment)\n", - " pid_command = \"pgrep -f 'ollama serve'\"\n", - " pid_result = subprocess.run(pid_command, capture_output=True, text=True, shell=True, check=True)\n", - " pids = pid_result.stdout.strip().splitlines()\n", - "\n", - " if not pids:\n", - " print(\"Ollama serve process not found.\")\n", - " return\n", - "\n", - " print(f\"Found Ollama serve process(es) with PID(s): {', '.join(pids)}\")\n", - "\n", - " # Kill the process(es)\n", - " for pid in pids:\n", - " try:\n", - " os.kill(int(pid), signal.SIGTERM) # Use SIGTERM for graceful shutdown\n", - " print(f\"Sent SIGTERM to PID {pid}\")\n", - " except ProcessLookupError:\n", - " print(f\"PID {pid} not found, it might have already stopped.\")\n", - " except Exception as e:\n", - " print(f\"Error stopping PID {pid}: {e}\")\n", - "\n", - " except subprocess.CalledProcessError as e:\n", - " print(f\"Could not find ollama serve process using pgrep: {e.stderr}\")\n", - " except Exception as e:\n", - " print(f\"An error occurred while trying to stop ollama: {e}\")\n", - "\n", - "# Stop the Ollama server\n", - "stop_ollama_serve()" - ], - "execution_count": 27, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Found Ollama serve process(es) with PID(s): 4819\n", - "PID 4819 not found, it might have already stopped.\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "RAG with Ollama" - ], - "metadata": { - "id": "98y8rOzwR4m_" - } - }, - { - "cell_type": "code", - "source": [ - "def check_ollama_installed():\n", - " \"\"\"Check if Ollama is installed\"\"\"\n", - " try:\n", - " result = subprocess.run([\"ollama\", \"--version\"],\n", - " capture_output=True, text=True, shell=True)\n", - " if result.returncode == 0:\n", - " print(f\"Ollama is installed: {result.stdout.strip()}\")\n", - " return True\n", - " except FileNotFoundError:\n", - " pass\n", - "\n", - " print(\"Ollama is not installed or not in PATH\")\n", - " return False\n", - "\n", - "def download_ollama():\n", - " \"\"\"Download Ollama installer for Windows\"\"\"\n", - " print(\"Downloading Ollama for Windows...\")\n", - "\n", - " url = \"https://ollama.com/download/OllamaSetup.exe\"\n", - " response = requests.get(url)\n", - "\n", - " installer_path = Path(\"OllamaSetup.exe\")\n", - " with open(installer_path, \"wb\") as f:\n", - " f.write(response.content)\n", - "\n", - " print(\"Ollama downloaded successfully!\")\n", - " print(\"Please run the installer manually and then continue.\")\n", - " print(f\"Installer location: {installer_path.absolute()}\")\n", - "\n", - " return installer_path\n", - "\n", - "def start_ollama_service():\n", - " \"\"\"Start Ollama service\"\"\"\n", - " try:\n", - " print(\"Starting Ollama service...\")\n", - " subprocess.Popen([\"ollama\", \"serve\"], shell=True)\n", - " time.sleep(3)\n", - " print(\"Ollama service started!\")\n", - " return True\n", - " except Exception as e:\n", - " print(f\"Failed to start Ollama: {e}\")\n", - " return False\n", - "\n", - "def pull_ollama_model(model_name=\"llama3.2:1b\"):\n", - " \"\"\"Pull a lightweight model for local inference\"\"\"\n", - " try:\n", - " print(f\"Pulling model: {model_name}\")\n", - " result = subprocess.run([\"ollama\", \"pull\", model_name],\n", - " capture_output=True, text=True, shell=True)\n", - " if result.returncode == 0:\n", - " print(f\"Model {model_name} pulled successfully!\")\n", - " return True\n", - " else:\n", - " print(f\"Failed to pull model: {result.stderr}\")\n", - " return False\n", - " except Exception as e:\n", - " print(f\"Error pulling model: {e}\")\n", - " return False\n", - "\n", - "def setup_ollama():\n", - " \"\"\"Complete Ollama setup\"\"\"\n", - " if not check_ollama_installed():\n", - " print(\"Ollama needs to be installed.\")\n", - " download_ollama()\n", - " return False\n", - "\n", - " if not start_ollama_service():\n", - " return False\n", - "\n", - " if not pull_ollama_model(\"llama3.2:1b\"):\n", - " return False\n", - "\n", - " print(\"Ollama setup complete!\")\n", - " return True\n", - "\n", - "# Check Ollama installation\n", - "check_ollama_installed()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "fXWk7qr-R6Ku", - "outputId": "24826126-a8bb-499f-f235-b4995236e75d" - }, - "execution_count": 28, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Ollama is not installed or not in PATH\n" - ] - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "False" - ] - }, - "metadata": {}, - "execution_count": 28 - } - ] - }, - { - "cell_type": "code", - "source": [ - "async def test_local_llm_rag():\n", - " \"\"\"\n", - " Test RAG with local Ollama LLM\n", - " \"\"\"\n", - " print(\"Testing RAG with Local LLM (Ollama)\")\n", - " print(\"=\" * 40)\n", - "\n", - " try:\n", - " # Initialize local Ollama LLM\n", - " llm = Ollama(\n", - " # model=\"llama3.2:1b\",\n", - " model=\"gemma3:1b\",\n", - " base_url=\"127.0.0.1:11434\",\n", - " request_timeout=60.0\n", - " )\n", - "\n", - " # Create query engine\n", - " query_engine = create_query_engine(vector_store, embed_model, llm)\n", - "\n", - " # Test queries\n", - " queries = [\n", - " \"Find personas interested in technology and AI\",\n", - " \"Who are the educators or teachers in the dataset?\",\n", - " \"Describe personas working with environmental topics\"\n", - " ]\n", - "\n", - " for query in queries:\n", - " print(f\"\\nQuery: {query}\")\n", - " print(\"-\" * 30)\n", - "\n", - " try:\n", - " response = query_rag(query_engine, query)\n", - " print(f\"Response: {response}\")\n", - " except Exception as e:\n", - " print(f\"Error: {e}\")\n", - " print(\"Make sure Ollama is running with: ollama serve\")\n", - "\n", - " except Exception as e:\n", - " print(f\"Setup error: {e}\")\n", - " print(\"Make sure Ollama is installed and running\")\n", - "\n", - "# Uncomment after Ollama setup is complete\n", - "await test_local_llm_rag()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "41FokCOjSPZg", - "outputId": "788f8683-9307-4a32-e521-da9a066cce61" - }, - "execution_count": 29, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Testing RAG with Local LLM (Ollama)\n", - "========================================\n", - "Setup error: Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download\n", - "Make sure Ollama is installed and running\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "source": [ - "Utility functions and advanced features" - ], - "metadata": { - "id": "da6POnwsZxz8" - } - }, - { - "cell_type": "code", - "source": [ - "def explore_lancedb_table(db, table_name):\n", - " \"\"\"\n", - " Explore the structure and content of the LanceDB table\n", - " \"\"\"\n", - " try:\n", - " table = db.open_table(table_name)\n", - "\n", - " print(\"Table Schema:\")\n", - " print(table.schema)\n", - "\n", - " print(f\"\\nTotal records: {table.count_rows()}\")\n", - "\n", - " print(\"\\nSample records:\")\n", - " df = table.to_pandas().head()\n", - " print(df)\n", - "\n", - " return table\n", - " except Exception as e:\n", - " print(f\"Error exploring table: {e}\")\n", - " return None\n", - "\n", - "def create_filtered_query_engine(db, table_name, embed_model, filter_dict=None):\n", - " \"\"\"\n", - " Create a query engine with metadata filtering capabilities\n", - " \"\"\"\n", - " from llama_index.core.vector_stores import MetadataFilters, MetadataFilter, FilterOperator\n", - "\n", - " # Reconnect to existing table\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./lancedb_data\",\n", - " table_name=table_name,\n", - " mode=\"read\"\n", - " )\n", - "\n", - " # Create index\n", - " index = VectorStoreIndex.from_vector_store(\n", - " vector_store=vector_store,\n", - " embed_model=embed_model\n", - " )\n", - "\n", - " # Create query engine with filters if provided\n", - " if filter_dict:\n", - " filters = MetadataFilters(\n", - " filters=[\n", - " MetadataFilter(\n", - " key=key,\n", - " value=value,\n", - " operator=FilterOperator.EQ\n", - " ) for key, value in filter_dict.items()\n", - " ]\n", - " )\n", - " query_engine = index.as_query_engine(\n", - " filters=filters,\n", - " response_mode=\"tree_summarize\"\n", - " )\n", - " else:\n", - " query_engine = index.as_query_engine(\n", - " response_mode=\"tree_summarize\"\n", - " )\n", - "\n", - " return query_engine\n", - "\n", - "async def batch_process_documents(documents, batch_size=50):\n", - " \"\"\"\n", - " Process documents in batches for large datasets\n", - " \"\"\"\n", - " embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n", - "\n", - " for i in range(0, len(documents), batch_size):\n", - " batch = documents[i:i+batch_size]\n", - " table_name = f\"personas_batch_{i//batch_size}\"\n", - "\n", - " vector_store = LanceDBVectorStore(\n", - " uri=\"./lancedb_data\",\n", - " table_name=table_name,\n", - " mode=\"overwrite\"\n", - " )\n", - "\n", - " pipeline = IngestionPipeline(\n", - " transformations=[\n", - " SentenceSplitter(chunk_size=512, chunk_overlap=20),\n", - " embed_model,\n", - " ],\n", - " vector_store=vector_store,\n", - " )\n", - "\n", - " nodes = await pipeline.arun(documents=batch)\n", - " print(f\"Processed batch {i//batch_size + 1}: {len(nodes)} nodes\")\n", - "\n", - "def show_usage_examples():\n", - " \"\"\"\n", - " Display usage examples for different scenarios\n", - " \"\"\"\n", - " print(\"Usage Examples:\")\n", - " print(\"=\" * 30)\n", - "\n", - " print(\"\\n1. Vector Search Only:\")\n", - " print(\" test_vector_search()\")\n", - "\n", - " print(\"\\n2. HuggingFace API RAG:\")\n", - " print(\" # Set API token first\")\n", - " print(\" os.environ['HUGGINGFACE_API_KEY'] = 'your_token'\")\n", - " print(\" await test_huggingface_rag()\")\n", - "\n", - " print(\"\\n3. Local LLM RAG:\")\n", - " print(\" # Install and setup Ollama first\")\n", - " print(\" setup_ollama()\")\n", - " print(\" await test_local_llm_rag()\")\n", - "\n", - " print(\"\\n4. Explore Database:\")\n", - " print(\" explore_lancedb_table(db, table_name)\")\n", - "\n", - "# Show usage examples\n", - "show_usage_examples()" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "W0OUHPVHZwUW", - "outputId": "a66ff1b9-1da5-4198-aabb-cd28e1338438" - }, - "execution_count": 30, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Usage Examples:\n", - "==============================\n", - "\n", - "1. Vector Search Only:\n", - " test_vector_search()\n", - "\n", - "2. HuggingFace API RAG:\n", - " # Set API token first\n", - " os.environ['HUGGINGFACE_API_KEY'] = 'your_token'\n", - " await test_huggingface_rag()\n", - "\n", - "3. Local LLM RAG:\n", - " # Install and setup Ollama first\n", - " setup_ollama()\n", - " await test_local_llm_rag()\n", - "\n", - "4. Explore Database:\n", - " explore_lancedb_table(db, table_name)\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "explore_lancedb_table('LanceDB', 'personas_rag')" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "cPU36POWgTET", - "outputId": "a962fce8-70c9-4be1-ab21-c45ecbb62639" - }, - "execution_count": 31, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Error exploring table: 'str' object has no attribute 'open_table'\n" - ] - } - ] - } - ] -} \ No newline at end of file diff --git a/Nandini_Reddy/README.md b/Nandini_Reddy/README.md deleted file mode 100644 index 1865a52..0000000 --- a/Nandini_Reddy/README.md +++ /dev/null @@ -1 +0,0 @@ -# Nandini_Reddy diff --git a/Nilesh_Anchan/README.md b/Nilesh_Anchan/README.md deleted file mode 100644 index 986e3f7..0000000 --- a/Nilesh_Anchan/README.md +++ /dev/null @@ -1 +0,0 @@ -# Nilesh_Anchan diff --git a/Nitin_Deshmukh/README.md b/Nitin_Deshmukh/README.md deleted file mode 100644 index 7efc183..0000000 --- a/Nitin_Deshmukh/README.md +++ /dev/null @@ -1 +0,0 @@ -# Nitin_Deshmukh diff --git a/Parag_Doshi/LinkedInPosting_n8n/Bolt_n8n_Viral_LinkedinPost_Creator (1).json b/Parag_Doshi/LinkedInPosting_n8n/Bolt_n8n_Viral_LinkedinPost_Creator (1).json deleted file mode 100644 index c75942a..0000000 --- a/Parag_Doshi/LinkedInPosting_n8n/Bolt_n8n_Viral_LinkedinPost_Creator (1).json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "name": "Bolt_n8n_Viral_LinkedinPost_Creator", - "nodes": [ - { - "parameters": { - "promptType": "define", - "text": "=\"Create a LinkedIn post with the following details:\\n\\n- Theme: -{{ $json.body.theme }} Category:{{ $json.body.category }} \\n- Tone: {{ $json.body.tone }}\\n- Length:{{ $json.body.length }} \n\n\\n\\nGuidelines:\\n1. Start with a strong hook to capture attention in the first 2 lines.\\n2. Keep the writing style engaging, professional, and authentic.\\n3. Make it easy to read with short sentences and clear flow.\\n4. Ensure the post fits LinkedInโ€™s style (not too casual, not too academic).\\n5. End with either a takeaway, a motivational line, or a subtle call-to-action (without sounding salesy).\\n\\nNow, generate the LinkedIn post.\"\n\nDon't use * and \\n in response. Format the post in paragraphs to look good for Linkedin", - "options": { - "systemMessage": "You are an expert LinkedIn content creator who writes highly engaging, professional, and authentic posts tailored to specific needs." - } - }, - "type": "@n8n/n8n-nodes-langchain.agent", - "typeVersion": 2.2, - "position": [ - 208, - 0 - ], - "id": "2bb2507d-00cf-4ba2-b590-cd00680c4bdf", - "name": "AI Agent" - }, - { - "parameters": { - "respondWith": "text", - "responseBody": "={{ $json.output }}", - "options": {} - }, - "type": "n8n-nodes-base.respondToWebhook", - "typeVersion": 1.4, - "position": [ - 560, - 0 - ], - "id": "6b050c03-3184-41c8-88a8-afd50e911d3a", - "name": "Respond to Webhook" - }, - { - "parameters": { - "person": "CYzFbeZYNy", - "text": "={{ $json.body.post_text }}", - "additionalFields": {} - }, - "type": "n8n-nodes-base.linkedIn", - "typeVersion": 1, - "position": [ - 512, - 336 - ], - "id": "f37fd42e-e071-4a2c-bb80-deb08579030b", - "name": "Create a post", - "credentials": { - "linkedInOAuth2Api": { - "id": "j0aYkKDOGKL7iJA0", - "name": "LinkedIn account" - } - } - }, - { - "parameters": { - "options": {} - }, - "type": "@n8n/n8n-nodes-langchain.lmChatOpenRouter", - "typeVersion": 1, - "position": [ - 112, - 176 - ], - "id": "c4d12c77-9164-4a5a-8de6-d3bddaf07192", - "name": "OpenRouter Chat Model", - "credentials": { - "openRouterApi": { - "id": "0EJmVHh2tyn4zPfd", - "name": "OpenRouter account" - } - } - }, - { - "parameters": { - "httpMethod": "POST", - "path": "ee3e9945-97dc-470e-9ee5-053d528e15eb", - "responseMode": "responseNode", - "options": {} - }, - "type": "n8n-nodes-base.webhook", - "typeVersion": 2.1, - "position": [ - 0, - 0 - ], - "id": "9fb7c662-2f65-48a9-81ba-295be60d9c85", - "name": "Generate Webhook", - "webhookId": "ee3e9945-97dc-470e-9ee5-053d528e15eb" - }, - { - "parameters": { - "httpMethod": "POST", - "path": "20c84917-2446-49f4-9270-28efb9d31ed5", - "options": {} - }, - "type": "n8n-nodes-base.webhook", - "typeVersion": 2.1, - "position": [ - 160, - 336 - ], - "id": "f5492583-14d0-4934-92bc-beae69a9e042", - "name": "Publish Webhook", - "webhookId": "20c84917-2446-49f4-9270-28efb9d31ed5" - } - ], - "pinData": {}, - "connections": { - "AI Agent": { - "main": [ - [ - { - "node": "Respond to Webhook", - "type": "main", - "index": 0 - } - ] - ] - }, - "OpenRouter Chat Model": { - "ai_languageModel": [ - [ - { - "node": "AI Agent", - "type": "ai_languageModel", - "index": 0 - } - ] - ] - }, - "Generate Webhook": { - "main": [ - [ - { - "node": "AI Agent", - "type": "main", - "index": 0 - } - ] - ] - }, - "Publish Webhook": { - "main": [ - [ - { - "node": "Create a post", - "type": "main", - "index": 0 - } - ] - ] - } - }, - "active": false, - "settings": { - "executionOrder": "v1" - }, - "versionId": "2b041383-2469-4bbe-9123-9904c0cee8e4", - "meta": { - "templateCredsSetupCompleted": true, - "instanceId": "159e159bfb718ea41c6c3c037acd86df815c8cd33efdb1ff88ee8c6ee8683504" - }, - "id": "4tTPIsOwDPyWZFr7", - "tags": [] -} \ No newline at end of file diff --git a/Parag_Doshi/LinkedInPosting_n8n/viral_linked_in_post_generator_react_frontend_single_file.jsx b/Parag_Doshi/LinkedInPosting_n8n/viral_linked_in_post_generator_react_frontend_single_file.jsx deleted file mode 100644 index 042b3c1..0000000 --- a/Parag_Doshi/LinkedInPosting_n8n/viral_linked_in_post_generator_react_frontend_single_file.jsx +++ /dev/null @@ -1,290 +0,0 @@ -"use client"; - -import React, { useMemo, useState } from "react"; - -// --- CONFIG --- -// You can point these directly at n8n webhooks OR at your Next.js API routes -// (recommended to avoid CORS). If using your server route, set both to -// something like "/api/generate" and "/api/publish" respectively. -const GENERATE_WEBHOOK_URL = - "https://pdoshi.app.n8n.cloud/webhook-test/ee3e9945-97dc-470e-9ee5-053d528e15eb"; -const PUBLISH_WEBHOOK_URL = - "https://pdoshi.app.n8n.cloud/webhook-test/20c84917-2446-49f4-9270-28efb9d31ed5"; - -export default function ViralLinkedInPostGenerator() { - // Form state (matches your Bolt schema) - const [theme, setTheme] = useState(""); - const [postType, setPostType] = useState("Thought Leadership"); - const [length, setLength] = useState("Medium (3-8 lines)"); - const [tone, setTone] = useState("Professional / Formal"); - - // Generated + publish response - const [generatedPost, setGeneratedPost] = useState(""); - const [publishResponse, setPublishResponse] = useState(null); - - // UI state - const [loading, setLoading] = useState<"generate" | "publish" | null>(null); - const [error, setError] = useState(null); - - // Options (enum values from your schema) - const postTypeOptions = useMemo( - () => [ - "Thought Leadership", - "Industry Insights", - "How-to / Educational", - "Personal Story", - "Opinion", - "Announcement", - "Case Study", - ], - [] - ); - - const lengthOptions = useMemo( - () => ["Short (1-3 lines)", "Medium (3-8 lines)", "Long (8+ lines)"], - [] - ); - - const toneOptions = useMemo( - () => [ - "Inspirational", - "Personal Story", - "Professional / Formal", - "GenZ / Casual", - "Humorous", - "Motivational", - ], - [] - ); - - const valid = theme.trim() && postType && length && tone; - - async function handleGenerate() { - if (!valid) { - setError("Please complete all fields before generating."); - return; - } - setError(null); - setLoading("generate"); - setPublishResponse(null); - - const payload = { theme, post_type: postType, length, tone }; - try { - const res = await fetch(GENERATE_WEBHOOK_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(payload), - }); - - // Some n8n nodes may return text; others return JSON. Try both. - const contentType = res.headers.get("content-type") || ""; - if (!res.ok) { - const msg = `Generate webhook error ${res.status}`; - throw new Error(msg); - } - - if (contentType.includes("application/json")) { - const data = await res.json(); - // Try a few common shapes - const text = - typeof data === "string" - ? data - : data.post || data.text || JSON.stringify(data, null, 2); - setGeneratedPost(text); - } else { - const text = await res.text(); - setGeneratedPost(text); - } - } catch (err: any) { - setError(err?.message || "Failed to generate post."); - } finally { - setLoading(null); - } - } - - async function handlePublish() { - if (!generatedPost.trim()) { - setError("Nothing to publish yet. Generate or paste a post first."); - return; - } - setError(null); - setLoading("publish"); - - const payload = { - post_text: generatedPost, - metadata: { theme, post_type: postType, length, tone }, - }; - - try { - const res = await fetch(PUBLISH_WEBHOOK_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(payload), - }); - - const contentType = res.headers.get("content-type") || ""; - if (contentType.includes("application/json")) { - const data = await res.json(); - setPublishResponse(data); - } else { - const text = await res.text(); - setPublishResponse({ raw: text, status: res.status }); - } - - if (!res.ok) { - throw new Error( - `Publish webhook error ${res.status}: ${JSON.stringify( - publishResponse, - null, - 2 - )}` - ); - } - } catch (err: any) { - setError(err?.message || "Failed to publish post."); - } finally { - setLoading(null); - } - } - - function FieldLabel({ children }: { children: React.ReactNode }) { - return ; - } - - return ( -
-
-
-

Viral LinkedIn Post Generator

-

- Fill the fields, click Generate to draft a - LinkedIn post, edit if needed, then Publish. -

-
- - {/* Form Card */} -
-
-
- Theme / Idea * - setTheme(e.target.value)} - /> -
- -
- Post Type / Category * - -
- -
- Length * - -
- -
- Tone * - -
-
- - {/* Actions */} -
- - - {!valid && ( - All fields are required. - )} -
- - {/* Generated Post */} -
- Generated LinkedIn Post -