diff --git a/backend/api/endpoints/images.py b/backend/api/endpoints/images.py index 29f9905..f0a4f4e 100644 --- a/backend/api/endpoints/images.py +++ b/backend/api/endpoints/images.py @@ -11,6 +11,7 @@ import tempfile import os import uuid +from pathlib import Path from backend.models.images import ( ImageGenerationRequest, @@ -47,6 +48,83 @@ logger = logging.getLogger(__name__) +def normalize_filename(filename: str) -> str: + """ + Normalize a filename to be safe for file systems. + + Args: + filename: The filename to normalize + + Returns: + A normalized filename safe for most file systems + """ + if not filename: + return filename + + # Use pathlib to handle the filename safely + path = Path(filename) + + # Get the stem (filename without extension) and suffix (extension) + stem = path.stem + suffix = path.suffix + + # Remove or replace invalid characters for most filesystems + # Keep alphanumeric, hyphens, underscores, and dots + stem = re.sub(r'[^a-zA-Z0-9_\-.]', '_', stem) + + # Remove multiple consecutive underscores + stem = re.sub(r'_+', '_', stem) + + # Remove leading/trailing underscores and dots + stem = stem.strip('_.') + + # Ensure the filename isn't empty + if not stem: + stem = "generated_image" + + # Reconstruct the filename + normalized = f"{stem}{suffix}" if suffix else stem + + # Ensure the filename isn't too long (most filesystems support 255 chars) + if len(normalized) > 200: # Leave some room for additional suffixes + # Truncate the stem but keep the extension + max_stem_length = 200 - len(suffix) + stem = stem[:max_stem_length] + normalized = f"{stem}{suffix}" if suffix else stem + + return normalized + + +async def generate_filename_for_prompt(prompt: str, extension: str = None) -> str: + """ + Generate a filename using the existing filename generation endpoint. + + Args: + prompt: The prompt used for image generation + extension: File extension (e.g., '.png', '.jpg') + + Returns: + Generated filename or None if generation fails + """ + try: + # Create request for filename generation + filename_request = ImageFilenameGenerateRequest( + prompt=prompt, + extension=extension + ) + + # Call the filename generation function directly + filename_response = generate_image_filename(filename_request) + + # Normalize the generated filename + generated_filename = normalize_filename(filename_response.filename) + + return generated_filename + + except Exception as e: + return None + + @router.post("/generate", response_model=ImageGenerationResponse) async def generate_image(request: ImageGenerationRequest): """Generate an image based on the provided prompt and settings""" @@ -74,9 +152,6 @@ async def generate_image(request: ImageGenerationRequest): if request.user: params["user"] = request.user - logger.info( - f"Generating image with gpt-image-1, quality: {request.quality}, size: {request.size}") - # Generate image response = dalle_client.generate_image(**params) @@ -99,10 +174,6 @@ async def generate_image(request: ImageGenerationRequest): input_tokens_details=input_tokens_details ) - # Log token usage for cost tracking - logger.info( - f"Token usage - Total: {token_usage.total_tokens}, Input: {token_usage.input_tokens}, Output: {token_usage.output_tokens}") - return ImageGenerationResponse( success=True, message="Refer to the imgen_model_response for details", @@ -145,19 +216,12 @@ async def edit_image(request: ImageEditRequest): if request.user: params["user"] = request.user - # Log information about multiple images if applicable + # Check if organization is verified when using multiple images if isinstance(request.image, list): image_count = len(request.image) - logger.info( - f"Editing with {image_count} reference images using gpt-image-1, quality: {request.quality}, size: {request.size}") - - # Check if organization is verified when using multiple images if image_count > 1 and not settings.OPENAI_ORG_VERIFIED: logger.warning( "Using multiple reference images requires organization verification") - else: - logger.info( - f"Editing single image using gpt-image-1, quality: {request.quality}, size: {request.size}") # Perform image editing response = dalle_client.edit_image(**params) @@ -209,10 +273,6 @@ async def edit_image_upload( ): """Edit input images uploaded via multipart form data""" try: - # Log request info - logger.info( - f"Received {len(image)} image(s) for editing with prompt: {prompt}") - # Validate file size for all images max_file_size_mb = settings.GPT_IMAGE_MAX_FILE_SIZE_MB temp_files = [] @@ -477,10 +537,32 @@ async def save_generated_images( # Reset file pointer img_file.seek(0) - # Create filename - quality_suffix = f"_{request.quality}" if request.model == "gpt-image-1" and hasattr( - request, "quality") else "" - filename = f"generated_image_{idx+1}{quality_suffix}.{img_format.lower()}" + # Generate intelligent filename using the existing endpoint + if request.prompt: + filename = await generate_filename_for_prompt( + request.prompt, + f".{img_format.lower()}" + ) + + # Add index suffix for multiple images + if filename and len(images_data) > 1: + # Insert index before the extension + path = Path(filename) + stem = path.stem + suffix = path.suffix + filename = f"{stem}_{idx+1}{suffix}" + logger.info( + f"Using generated filename with index: {filename}") + elif filename: + logger.info(f"Using generated filename: {filename}") + + # Fallback to default naming if filename generation fails + if not filename: + quality_suffix = f"_{request.quality}" if request.model == "gpt-image-1" and hasattr( + request, "quality") else "" + filename = f"generated_image_{idx+1}{quality_suffix}.{img_format.lower()}" + filename = normalize_filename(filename) + logger.info(f"Using fallback filename: {filename}") elif "url" in img_data: # Download image from URL @@ -507,10 +589,27 @@ async def save_generated_images( # Reset file pointer img_file.seek(0) - # Create filename - quality_suffix = f"_{request.quality}" if request.model == "gpt-image-1" and hasattr( - request, "quality") else "" - filename = f"generated_image_{idx+1}{quality_suffix}.{ext}" + # Generate intelligent filename using the existing endpoint + if request.prompt: + filename = await generate_filename_for_prompt( + request.prompt, + f".{ext}" + ) + + # Add index suffix for multiple images + if filename and len(images_data) > 1: + # Insert index before the extension + path = Path(filename) + stem = path.stem + suffix = path.suffix + filename = f"{stem}_{idx+1}{suffix}" + + # Fallback to default naming if filename generation fails + if not filename: + quality_suffix = f"_{request.quality}" if request.model == "gpt-image-1" and hasattr( + request, "quality") else "" + filename = f"generated_image_{idx+1}{quality_suffix}.{ext}" + filename = normalize_filename(filename) else: logger.warning( f"Unsupported image data format for image {idx+1}") @@ -627,7 +726,6 @@ def analyze_image(req: ImageAnalyzeRequest): file_path += f"?{image_sas_token}" # Download the image from the URL - logger.info(f"Downloading image for analysis from: {file_path}") response = requests.get(file_path, timeout=30) if response.status_code != 200: raise HTTPException( @@ -640,7 +738,6 @@ def analyze_image(req: ImageAnalyzeRequest): # Option 2: Process from base64 string elif req.base64_image: - logger.info("Processing image from base64 data") try: # Decode base64 to binary image_content = base64.b64decode(req.base64_image) @@ -658,8 +755,6 @@ def analyze_image(req: ImageAnalyzeRequest): has_transparency = img.mode == 'RGBA' and 'A' in img.getbands() if has_transparency: - logger.info( - "Image has transparency, converting for analysis") # Create a white background background = Image.new( 'RGBA', img.size, (255, 255, 255, 255)) @@ -678,8 +773,6 @@ def analyze_image(req: ImageAnalyzeRequest): # This is optional but can help with very large images width, height = img.size if width > 1500 or height > 1500: - logger.info( - f"Image is large ({width}x{height}), resizing for analysis") # Calculate new dimensions max_dimension = 1500 if width > height: @@ -713,7 +806,6 @@ def analyze_image(req: ImageAnalyzeRequest): image_base64 = re.sub(r"^data:image/.+;base64,", "", image_base64) # analyze the image using the LLM - logger.info("Sending image to LLM for analysis") image_analyzer = ImageAnalyzer(llm_client, settings.LLM_DEPLOYMENT) insights = image_analyzer.image_chat( image_base64, analyze_image_system_message) @@ -774,17 +866,12 @@ def protect_image_prompt(req: ImagePromptBrandProtectionRequest): try: if req.brands_to_protect: if req.protection_mode == "replace": - logger.info( - f"Replace competitor brands of: {req.brands_to_protect}") system_message = brand_protect_replace_msg.format( brands=req.brands_to_protect) elif req.protection_mode == "neutralize": - logger.info( - f"Neutralize competitor brands of: {req.brands_to_protect}") system_message = brand_protect_neutralize_msg.format( brands=req.brands_to_protect) else: - logger.info(f"No brand protection specified.") return ImagePromptBrandProtectionResponse(enhanced_prompt=req.original_prompt) # Ensure LLM client is available diff --git a/backend/api/endpoints/videos.py b/backend/api/endpoints/videos.py index 14371fa..02f5c3a 100644 --- a/backend/api/endpoints/videos.py +++ b/backend/api/endpoints/videos.py @@ -280,12 +280,28 @@ def create_video_generation_with_analysis(req: VideoGenerationWithAnalysisReques from backend.core.azure_storage import AzureBlobStorageService from azure.storage.blob import ContentSettings - # Generate the final filename for gallery - final_filename = f"{req.prompt.replace(' ', '_')}_{generation_id}.mp4" - # Create Azure storage service azure_service = AzureBlobStorageService() + # Generate the base filename + base_filename = f"{req.prompt.replace(' ', '_')}_{generation_id}.mp4" + + # Extract folder path from request metadata and normalize it + folder_path = req.metadata.get( + 'folder') if req.metadata else None + final_filename = base_filename + + if folder_path and folder_path != 'root': + # Use Azure service's normalize_folder_path method for consistency + normalized_folder = azure_service.normalize_folder_path( + folder_path) + final_filename = f"{normalized_folder}{base_filename}" + logger.info( + f"Uploading video to folder: {normalized_folder}") + else: + logger.info( + "Uploading video to root directory") + # Upload to Azure Blob Storage container_client = azure_service.blob_service_client.get_container_client( "videos") @@ -305,6 +321,11 @@ def create_video_generation_with_analysis(req: VideoGenerationWithAnalysisReques "upload_date": datetime.now().isoformat() } + # Add folder path to metadata if specified + if folder_path and folder_path != 'root': + upload_metadata["folder_path"] = azure_service.normalize_folder_path( + folder_path) + # Read the file and upload with metadata with open(downloaded_path, 'rb') as video_file: blob_client.upload_blob( @@ -318,6 +339,9 @@ def create_video_generation_with_analysis(req: VideoGenerationWithAnalysisReques blob_url = blob_client.url logger.info( f"Uploaded video to gallery: {blob_url}") + if folder_path and folder_path != 'root': + logger.info( + f"Video uploaded to folder '{folder_path}' with normalized path '{azure_service.normalize_folder_path(folder_path)}'") except Exception as upload_error: logger.warning( diff --git a/backend/core/azure_storage.py b/backend/core/azure_storage.py index 67a620d..4debeb5 100644 --- a/backend/core/azure_storage.py +++ b/backend/core/azure_storage.py @@ -259,9 +259,6 @@ async def upload_asset(self, file: UploadFile, asset_type: str = "image", # Determine container based on asset type container_name = self.image_container if asset_type == "image" else self.video_container - # Generate a unique ID for the file - file_id = str(uuid.uuid4()) - # Get file extension and determine content type _, ext = os.path.splitext(file.filename) content_type = self._get_content_type(ext, asset_type) @@ -269,14 +266,33 @@ async def upload_asset(self, file: UploadFile, asset_type: str = "image", # Normalize folder path normalized_folder_path = self.normalize_folder_path(folder_path) - # Create blob name with optional folder path and UUID - blob_name = f"{normalized_folder_path}{file_id}{ext}" - - # Get container client - container_client = self.blob_service_client.get_container_client( - container_name) - - # Create blob client + # Use the provided filename if available, otherwise generate UUID + if file.filename and file.filename.strip(): + # Remove the extension from the filename to avoid double extensions + filename_without_ext = os.path.splitext(file.filename)[0] + # Create blob name with the provided filename + blob_name = f"{normalized_folder_path}{filename_without_ext}{ext}" + file_id = filename_without_ext # For backward compatibility in response + # Check if blob already exists and handle conflicts + container_client = self.blob_service_client.get_container_client( + container_name) + blob_client = container_client.get_blob_client(blob_name) + + # If blob exists, append a UUID suffix to make it unique + if blob_client.exists(): + # Use first 8 chars of UUID + unique_suffix = str(uuid.uuid4())[:8] + blob_name = f"{normalized_folder_path}{filename_without_ext}_{unique_suffix}{ext}" + file_id = f"{filename_without_ext}_{unique_suffix}" + else: + # Fallback to UUID if no filename provided + file_id = str(uuid.uuid4()) + blob_name = f"{normalized_folder_path}{file_id}{ext}" + + # Create blob client (container_client already created above for conflict checking) + if 'container_client' not in locals(): + container_client = self.blob_service_client.get_container_client( + container_name) blob_client = container_client.get_blob_client(blob_name) # Set content settings @@ -323,9 +339,6 @@ async def upload_asset(self, file: UploadFile, asset_type: str = "image", "folder_path": normalized_folder_path } except Exception as e: - import traceback - print(f"Azure upload error: {str(e)}") - print(f"Error trace: {traceback.format_exc()}") raise def get_asset_metadata(self, blob_name: str, container_name: str) -> Optional[Dict[str, str]]: @@ -385,9 +398,6 @@ def update_asset_metadata(self, blob_name: str, container_name: str, metadata: D except ResourceNotFoundError: return False except Exception as e: - import traceback - print(f"Metadata update error: {str(e)}") - print(f"Error trace: {traceback.format_exc()}") return False def _get_content_type(self, extension: str, asset_type: str) -> str: @@ -532,5 +542,4 @@ def list_folders(self, container_name: str) -> List[str]: # Convert to sorted list return sorted(list(folders)) except Exception as e: - print(f"Error listing folders: {str(e)}") return [] diff --git a/frontend/app/edit-image/components/GenerateForm.tsx b/frontend/app/edit-image/components/GenerateForm.tsx index 8e25c13..6dec58c 100644 --- a/frontend/app/edit-image/components/GenerateForm.tsx +++ b/frontend/app/edit-image/components/GenerateForm.tsx @@ -41,17 +41,11 @@ export default function GenerateForm({ // Convert the mask to proper format for API (transparent where edits should happen) const getProperMaskForAPI = (): HTMLCanvasElement => { - console.log("----------------------------------------"); - console.log("Creating proper mask for API in GenerateForm..."); - // Create a properly formatted mask with transparency and alpha channel const properMaskCanvas = document.createElement('canvas'); properMaskCanvas.width = originalImage.width; properMaskCanvas.height = originalImage.height; - console.log("Original image dimensions:", originalImage.width, "x", originalImage.height); - console.log("Drawing canvas dimensions:", maskCanvas.width, "x", maskCanvas.height); - // Get the context for the canvas const ctx = properMaskCanvas.getContext('2d', { willReadFrequently: true }); if (!ctx) { @@ -80,7 +74,6 @@ export default function GenerateForm({ // Calculate scale factors const scaleX = originalImage.width / maskCanvas.width; const scaleY = originalImage.height / maskCanvas.height; - console.log("Scale factors:", scaleX.toFixed(2), "x", scaleY.toFixed(2)); // Initialize all pixels as opaque black (areas to preserve) for (let i = 0; i < scaledMaskData.data.length; i += 4) { @@ -118,17 +111,12 @@ export default function GenerateForm({ } } - console.log("Total transparent pixels in final mask:", transparentPixels); - console.log("Transparent pixel percentage:", ((transparentPixels / (originalImage.width * originalImage.height)) * 100).toFixed(2) + "%"); - // Put the modified data back ctx.putImageData(scaledMaskData, 0, 0); if (transparentPixels === 0) { console.error("ERROR: Final mask has no transparent pixels! API will not edit any part of the image."); } - - console.log("----------------------------------------"); return properMaskCanvas; }; @@ -157,7 +145,6 @@ export default function GenerateForm({ // Check if the original image is too large and needs optimization if (originalImage.file.size > 5 * 1024 * 1024) { // If larger than 5MB - console.log(`Original image is large (${(originalImage.file.size / 1024 / 1024).toFixed(2)}MB), optimizing...`); // Create a canvas to resize the image const canvas = document.createElement('canvas'); @@ -201,8 +188,7 @@ export default function GenerateForm({ lastModified: Date.now() }); - // Log the optimization results - console.log(`Optimized image size from ${(originalImage.file.size / 1024 / 1024).toFixed(2)}MB to ${(blob.size / 1024 / 1024).toFixed(2)}MB`); + // Add to the form data formData.append('image', optimizedFile); @@ -220,7 +206,6 @@ export default function GenerateForm({ }); } else { // If context fails, use original - console.log('Context 2D not available, using original image'); formData.append('image', originalImage.file); } } else { diff --git a/frontend/app/edit-image/components/createDebugMask.ts b/frontend/app/edit-image/components/createDebugMask.ts index 7ca200a..123d63a 100644 --- a/frontend/app/edit-image/components/createDebugMask.ts +++ b/frontend/app/edit-image/components/createDebugMask.ts @@ -7,18 +7,10 @@ export function createAndShowDebugMask( originalWidth: number, originalHeight: number ): void { - console.log("Creating debug mask for visualization..."); - console.log("Original drawing dimensions:", maskCanvas.width, "x", maskCanvas.height); - console.log("Target image dimensions:", originalWidth, "x", originalHeight); - // Check aspect ratios const drawingAspectRatio = maskCanvas.width / maskCanvas.height; const targetAspectRatio = originalWidth / originalHeight; - console.log("Drawing aspect ratio:", drawingAspectRatio.toFixed(2)); - console.log("Target aspect ratio:", targetAspectRatio.toFixed(2)); - console.log("Aspect ratio difference:", Math.abs(drawingAspectRatio - targetAspectRatio).toFixed(2)); - // Create a debug mask with proper dimensions const debugMaskUrl = createDebugMask(maskCanvas, originalWidth, originalHeight); if (!debugMaskUrl) return; @@ -26,7 +18,6 @@ export function createAndShowDebugMask( // Open in new tab const win = window.open(); if (!win) { - console.error("Could not open debug window"); return; } diff --git a/frontend/app/gallery/page.tsx b/frontend/app/gallery/page.tsx index 34b8cad..9b31b58 100644 --- a/frontend/app/gallery/page.tsx +++ b/frontend/app/gallery/page.tsx @@ -151,9 +151,9 @@ export default function GalleryPage() { }, []); // Function to handle video deletion - const handleVideoDeleted = (deletedVideoId: string) => { - // Remove the deleted video from the state - setVideos(prevVideos => prevVideos.filter(video => video.id !== deletedVideoId)); + const handleVideoDeleted = (deletedVideoName: string) => { + // Remove the deleted video from the state using the unique video name (blob name) + setVideos(prevVideos => prevVideos.filter(video => video.name !== deletedVideoName)); // If we've deleted a video, we might want to load another one to replace it if (hasMore && videos.length < limit * 2) { @@ -186,7 +186,12 @@ export default function GalleryPage() { // Function to generate sample tags for videos const generateTagsForVideo = (video: VideoMetadata, index: number): string[] => { - // If the video already has tags, use those + // First, check if we have real analysis tags + if (video.analysis?.tags && video.analysis.tags.length > 0) { + return video.analysis.tags; + } + + // If the video already has tags from other sources, use those if (video.tags && video.tags.length > 0) { return video.tags; } @@ -203,44 +208,8 @@ export default function GalleryPage() { } } - // A pool of potential tags - const tagPool = [ - "AI Generated", "Landscape", "Portrait", "Nature", "Urban", - "Abstract", "People", "Architecture", "Animals", "Technology", - "Cinematic", "Outdoors", "Indoor", "Animation", "Experimental" - ]; - - // Deterministic selection based on the video properties - const selectedTags: string[] = []; - - // Add "AI Generated" tag to all videos - selectedTags.push("AI Generated"); - - // Add orientation tags based on title or description - if (video.title.toLowerCase().includes("landscape") || - (video.description && video.description.toLowerCase().includes("landscape"))) { - selectedTags.push("Landscape"); - } else if (video.title.toLowerCase().includes("portrait") || - (video.description && video.description.toLowerCase().includes("portrait"))) { - selectedTags.push("Portrait"); - } else { - // Use the index to select a tag if none found in title/description - selectedTags.push(index % 2 === 0 ? "Landscape" : "Portrait"); - } - - // Add a content tag based on index - const contentIndex = (index * 3) % (tagPool.length - 2) + 2; // Skip the first two tags (AI Generated & Landscape/Portrait) - selectedTags.push(tagPool[contentIndex]); - - // Randomly add an extra tag for some videos - if (index % 3 === 0) { - const extraIndex = (index * 7) % (tagPool.length - 2) + 2; - if (tagPool[extraIndex] && !selectedTags.includes(tagPool[extraIndex])) { - selectedTags.push(tagPool[extraIndex]); - } - } - - return selectedTags; + // If no real tags are available, return empty array instead of dummy tags + return []; }; @@ -329,9 +298,12 @@ export default function GalleryPage() { ) : videos.length > 0 ? ( videos.map((video, index) => ( handleVideoDeleted(video.name)} autoPlay={autoPlay} tags={generateTagsForVideo(video, index)} /> diff --git a/frontend/app/layout.tsx b/frontend/app/layout.tsx index e0b4f5d..8dc26b3 100644 --- a/frontend/app/layout.tsx +++ b/frontend/app/layout.tsx @@ -9,6 +9,7 @@ import { Separator } from "@/components/ui/separator"; import { VideoQueueProvider } from "@/context/video-queue-context"; import { JobsProvider } from "@/context/jobs-context"; import { ImageSettingsProvider } from "@/context/image-settings-context"; +import { FolderProvider } from "@/context/folder-context"; import { VideoQueueClient } from "@/components/video-queue-client"; import { RefreshJobsButton } from "@/components/refresh-jobs-button"; import { Toaster } from "@/components/ui/sonner"; @@ -47,6 +48,7 @@ export default function RootLayout({ children }: RootLayoutProps) { + {/* Main layout with sidebar */}
{/* Content area with sidebar */} @@ -82,6 +84,7 @@ export default function RootLayout({ children }: RootLayoutProps) {
+
diff --git a/frontend/app/new-image/page.tsx b/frontend/app/new-image/page.tsx index b11f75b..7c3b3ab 100644 --- a/frontend/app/new-image/page.tsx +++ b/frontend/app/new-image/page.tsx @@ -471,7 +471,7 @@ function NewImagePageContent() { className="columns-1 sm:columns-2 md:columns-2 lg:columns-3 gap-4 space-y-4" > {images.map((image, index) => ( -
+
{ // Create a callback function to refresh the gallery const refreshGalleryCallback = () => { - console.log("Gallery refresh callback triggered, reloading videos"); // Check if we're already loading to avoid duplicate refreshes if (!loading && !isRefreshing) { loadVideos(true, true); @@ -292,26 +291,10 @@ function NewVideoPageContent() { setTimeout(() => { // Don't refresh unnecessarily if we just refreshed or are loading if (!loading && !isRefreshing) { - console.log("Videos uploaded - refreshing gallery"); - // Do a full refresh of the gallery loadVideos(true); - // Show success notification after videos load - toast.success(`Videos added to gallery`, { - description: "Your videos are now visible in the gallery", - duration: 3000, - action: { - label: "View", - onClick: () => { - // Scroll to the top of the gallery to show new content - const galleryContainer = document.querySelector('.gallery-container'); - if (galleryContainer) { - galleryContainer.scrollTo({ top: 0, behavior: 'smooth' }); - } - } - } - }); + // Don't show additional toast here - the video queue context already shows success notification } }, 1000); } @@ -321,9 +304,9 @@ function NewVideoPageContent() { // and can be refreshed manually or with auto-refresh // Function to handle video deletion - const handleVideoDeleted = (deletedVideoId: string) => { - // Remove the deleted video from the state - setVideos(prevVideos => prevVideos.filter(video => video.id !== deletedVideoId)); + const handleVideoDeleted = (deletedVideoName: string) => { + // Remove the deleted video from the state using the unique video name (blob name) + setVideos(prevVideos => prevVideos.filter(video => video.name !== deletedVideoName)); // If we've deleted a video, we might want to load another one to replace it if (hasMore && videos.length < limit * 2) { @@ -356,7 +339,12 @@ function NewVideoPageContent() { // Function to generate sample tags for videos const generateTagsForVideo = (video: VideoMetadata, index: number): string[] => { - // If the video already has tags, use those + // First, check if we have real analysis tags + if (video.analysis?.tags && video.analysis.tags.length > 0) { + return video.analysis.tags; + } + + // If the video already has tags from other sources, use those if (video.tags && video.tags.length > 0) { return video.tags; } @@ -373,44 +361,8 @@ function NewVideoPageContent() { } } - // A pool of potential tags - const tagPool = [ - "AI Generated", "Landscape", "Portrait", "Nature", "Urban", - "Abstract", "People", "Architecture", "Animals", "Technology", - "Cinematic", "Outdoors", "Indoor", "Animation", "Experimental" - ]; - - // Deterministic selection based on the video properties - const selectedTags: string[] = []; - - // Add "AI Generated" tag to all videos - selectedTags.push("AI Generated"); - - // Add orientation tags based on title or description - if (video.title.toLowerCase().includes("landscape") || - (video.description && video.description.toLowerCase().includes("landscape"))) { - selectedTags.push("Landscape"); - } else if (video.title.toLowerCase().includes("portrait") || - (video.description && video.description.toLowerCase().includes("portrait"))) { - selectedTags.push("Portrait"); - } else { - // Use the index to select a tag if none found in title/description - selectedTags.push(index % 2 === 0 ? "Landscape" : "Portrait"); - } - - // Add a content tag based on index - const contentIndex = (index * 3) % (tagPool.length - 2) + 2; // Skip the first two tags (AI Generated & Landscape/Portrait) - selectedTags.push(tagPool[contentIndex]); - - // Randomly add an extra tag for some videos - if (index % 3 === 0) { - const extraIndex = (index * 7) % (tagPool.length - 2) + 2; - if (tagPool[extraIndex] && !selectedTags.includes(tagPool[extraIndex])) { - selectedTags.push(tagPool[extraIndex]); - } - } - - return selectedTags; + // If no real tags are available, return empty array instead of dummy tags + return []; }; // Group videos into columns for masonry layout @@ -480,7 +432,6 @@ function NewVideoPageContent() { variants: string; modality: string; analyzeVideo: boolean; - mode: string; brandsProtection: string; imageModel: string; hd: boolean; @@ -503,21 +454,17 @@ function NewVideoPageContent() { setIsGenerating(true); + // Show immediate feedback to the user + const toastId = toast.loading(`Creating ${settings.variants} video${parseInt(settings.variants) > 1 ? 's' : ''}...`, { + description: `${settings.aspectRatio}, ${settings.duration} duration - this may take 1-2 minutes` + }); + try { let generationPrompt = settings.prompt; + let brandProtectionApplied = false; // Apply brand protection if enabled from global settings if (imageSettings.settings.brandsProtection !== "off" && imageSettings.settings.brandsList.length > 0) { - // Log that protection is active - console.log("🛡️ Brand Protection Activated:", { - mode: imageSettings.settings.brandsProtection, - brands: imageSettings.settings.brandsList - }); - - toast.info("Brand protection activated", { - description: `Applying ${imageSettings.settings.brandsProtection} protection for ${imageSettings.settings.brandsList.length} brand${imageSettings.settings.brandsList.length > 1 ? 's' : ''}...` - }); - try { // Call the brand protection API generationPrompt = await protectImagePrompt( @@ -526,41 +473,18 @@ function NewVideoPageContent() { imageSettings.settings.brandsProtection ); - // Log the resulting prompt if (generationPrompt !== settings.prompt) { - console.log("Original prompt:", settings.prompt); - console.log("Protected prompt:", generationPrompt); - - toast.success("Brand protection applied", { - description: "The prompt has been modified for brand safety" - }); - } else { - toast.info("Brand protection processed", { - description: "No changes were needed to protect the specified brands" - }); + brandProtectionApplied = true; } } catch (error) { console.error('Error applying brand protection:', error); - toast.error("Brand protection failed", { - description: "Using original prompt instead" - }); - // Fallback to original prompt on error + // Don't show a separate error toast for brand protection - just log and continue + // The main generation will still proceed with the original prompt generationPrompt = settings.prompt; } } - // Show generation started toast - toast(`Starting ${settings.modality} generation with your prompt...`); - - if (settings.mode === "dev") { - // In dev mode, just simulate generation - setTimeout(() => { - setIsGenerating(false); - toast.success(`Your ${settings.modality} has been generated`, { - description: "Development mode is using placeholder videos." - }); - }, 3000); - } else { + { // For real video generation try { // Convert string values to numbers for type compatibility @@ -573,7 +497,7 @@ function NewVideoPageContent() { brandsProtection: settings.brandsProtection, brandsList: settings.brandsList, analyzeVideo: settings.analyzeVideo, // Pass the analysis setting - mode: settings.mode // Pass the mode setting + folder: settings.folder // Pass the folder setting }; // Add to queue - this will create the job in the backend @@ -585,29 +509,8 @@ function NewVideoPageContent() { [jobId]: true })); - // Show detailed toast about the generation process - toast.success("Video generation queued", { - description: `Creating ${videoSettings.variants} video variant${videoSettings.variants > 1 ? 's' : ''} with ${videoSettings.aspectRatio} aspect ratio`, - duration: 5000, - action: { - label: "View Status", - onClick: () => { - // Scroll to the top to show the active generation badge - const galleryContainer = document.querySelector('.gallery-container'); - if (galleryContainer) { - galleryContainer.scrollTo({ top: 0, behavior: 'smooth' }); - } - } - } - }); - - // Provide info about when to expect results - setTimeout(() => { - toast.info("Video generation in progress", { - description: `This will take about 1-2 minutes. The gallery will automatically update when done.`, - duration: 8000 - }); - }, 2000); + // Dismiss the loading toast - the job is now in progress + toast.dismiss(toastId); // No need to reset lastCompletedJobId now that we're tracking in jobsInProgress @@ -616,6 +519,7 @@ function NewVideoPageContent() { } catch (error) { console.error("Error starting video generation:", error); toast.error("Could not connect to the backend API", { + id: toastId, description: "Please try again later" }); setIsGenerating(false); @@ -625,6 +529,7 @@ function NewVideoPageContent() { console.error("Error during generation:", error); setIsGenerating(false); toast.error("An error occurred while generating the video", { + id: toastId, description: "Please try again later" }); } @@ -807,7 +712,7 @@ function NewVideoPageContent() { const isLarge = (videoIndex * 3 + columnIndex) % 5 === 0; return ( -
+
handleVideoDeleted(video.id)} + onDelete={() => handleVideoDeleted(video.name)} onClick={() => handleVideoClick(video)} autoPlay={autoPlay} /> diff --git a/frontend/components/ImageCreationContainer.tsx b/frontend/components/ImageCreationContainer.tsx index 35f5ca5..4a5275f 100644 --- a/frontend/components/ImageCreationContainer.tsx +++ b/frontend/components/ImageCreationContainer.tsx @@ -140,11 +140,8 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC // Apply brand protection if enabled let generationPrompt = originalPrompt; + let brandProtectionApplied = false; if (newSettings.brandsProtection !== "off" && newSettings.brandsList && newSettings.brandsList.length > 0) { - toast.info("Brand protection activated", { - description: `Applying ${newSettings.brandsProtection} protection for ${newSettings.brandsList.length} brand${newSettings.brandsList.length > 1 ? 's' : ''}...` - }); - try { // Call the brand protection API generationPrompt = await protectImagePrompt( @@ -155,16 +152,8 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC // Log the difference if in debug mode if (generationPrompt !== originalPrompt) { - console.log("Original prompt:", originalPrompt); - console.log("Protected prompt:", generationPrompt); - toast.success("Brand protection applied", { - description: "The prompt has been modified for brand safety" - }); - } else { - toast.info("Brand protection processed", { - description: "No changes were needed to protect the specified brands" - }); + brandProtectionApplied = true; } } catch (error) { console.error('Error applying brand protection:', error); @@ -181,9 +170,9 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC // If source images are provided, use the edit endpoint if (newSettings.sourceImages && newSettings.sourceImages.length > 0) { - // Show toast for image editing - toast.info("Image editing started", { - description: `Editing ${newSettings.sourceImages.length} image${newSettings.sourceImages.length > 1 ? 's' : ''} with prompt: "${originalPrompt.substring(0, 50)}${originalPrompt.length > 50 ? '...' : ''}"`, + // Show single consolidated toast for image editing + const editingToast = toast.loading("Editing images...", { + description: `Processing ${newSettings.sourceImages.length} image${newSettings.sourceImages.length > 1 ? 's' : ''} with your prompt${brandProtectionApplied ? ' (brand protection applied)' : ''}`, }); // Call the image edit API with the protected prompt @@ -195,13 +184,15 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC newSettings.quality // Quality parameter ); + // Update the loading toast to success toast.success("Image editing completed", { - description: "Processing edited images..." + id: editingToast, + description: `Successfully edited ${newSettings.variations} image${newSettings.variations > 1 ? 's' : ''}` }); } else { - // Show toast for image generation - toast.info("Image generation started", { - description: `Generating image with prompt: "${originalPrompt.substring(0, 50)}${originalPrompt.length > 50 ? '...' : ''}"`, + // Show single consolidated toast for image generation + const generatingToast = toast.loading("Generating images...", { + description: `Creating ${newSettings.variations} image${newSettings.variations > 1 ? 's' : ''} with your prompt${brandProtectionApplied ? ' (brand protection applied)' : ''}`, }); // Call the image generation API with the protected prompt @@ -215,8 +206,10 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC newSettings.quality // Quality parameter ); + // Update the loading toast to success toast.success("Image generation completed", { - description: "Processing generated images..." + id: generatingToast, + description: `Successfully generated ${newSettings.variations} image${newSettings.variations > 1 ? 's' : ''}` }); } @@ -238,11 +231,7 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC ); if (hasBase64Images) { - toast.info("AI analysis started", { - description: "Analyzing generated images before saving..." - }); - - // Process each image and collect analysis results + // Process each image and collect analysis results (silently) const analysisPromises = response.imgen_model_response.data.map( async (imageData: ImageData, idx: number) => { if (imageData.b64_json) { @@ -265,15 +254,10 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC ); const analysisResults = await Promise.all(analysisPromises); - const successCount = analysisResults.filter(r => r && r.analysis).length; successfulAnalysis = analysisResults.filter(r => r && r.analysis); // Assign here - if (successCount > 0) { - toast.success(`AI analysis completed`, { - description: `Successfully analyzed ${successCount} of ${response.imgen_model_response.data.length} images` - }); - - // Store analysis results to use when saving + // Store analysis results to use when saving (no toast needed) + if (successfulAnalysis.length > 0) { setGenerationResponseData((prev: GenerationResponse | null) => ({ ...(prev || {}), analysisResults: successfulAnalysis @@ -321,9 +305,9 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC try { setIsUploading(true); - // Show toast for upload process - toast.info("Uploading images", { - description: `Saving generated images${folder ? ' to ' + folder : ' to root folder'}...` + // Show consolidated saving toast + const savingToast = toast.loading("Saving images...", { + description: `Uploading to ${folder || 'root folder'}${preAnalysisResults && preAnalysisResults.length > 0 ? ' with AI analysis' : ''}...` }); // Add brand protection metadata if available @@ -340,9 +324,9 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC try { enhancedResponse.metadata = { ...(enhancedResponse.metadata || {}), - brand_protection_mode: generationResponse.brandProtection.mode, - protected_brands: generationResponse.brandProtection.brands.join(', '), - protected_prompt: generationResponse.brandProtection.protectedPrompt + brand_protection_mode: generationResponse.brandProtection!.mode, + protected_brands: generationResponse.brandProtection!.brands.join(', '), + protected_prompt: generationResponse.brandProtection!.protectedPrompt }; } catch (error) { console.error("Error adding brand protection metadata:", error); @@ -361,11 +345,12 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC imageSize // Pass imageSize here ); - // Show success message with details + // Update the loading toast to success toast.success(`${saveResponse.total_saved} images saved`, { + id: savingToast, description: folder - ? `Images have been saved to folder: ${folder}` - : "Images have been saved to root folder" + ? `Successfully saved to folder: ${folder}` + : "Successfully saved to root folder" }); // If we have pre-computed analysis results passed directly, apply them @@ -373,9 +358,6 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC saveResponse.saved_images && saveResponse.saved_images.length > 0) { setIsAnalyzing(true); - toast.info("Applying AI analysis", { - description: "Updating metadata with pre-computed analysis..." - }); let successCount = 0; @@ -419,11 +401,7 @@ export function ImageCreationContainer({ className = "", onImagesSaved }: ImageC } } - if (successCount > 0) { - toast.success(`AI analysis applied`, { - description: `Successfully updated metadata for ${successCount} of ${saveResponse.saved_images.length} images` - }); - } + // AI analysis is applied silently - no additional toast needed setIsAnalyzing(false); } diff --git a/frontend/components/ImageOverlay.tsx b/frontend/components/ImageOverlay.tsx index 90a44be..60702e4 100644 --- a/frontend/components/ImageOverlay.tsx +++ b/frontend/components/ImageOverlay.tsx @@ -23,6 +23,7 @@ import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group"; import { Input } from "@/components/ui/input"; import { useImageSettings } from "@/context/image-settings-context"; import { useTheme } from "next-themes"; +import { useFolderContext } from "@/context/folder-context"; interface ImageOverlayProps { onGenerate: (settings: { @@ -82,6 +83,7 @@ export function ImageOverlay({ // Add theme context const { theme, resolvedTheme } = useTheme(); const [isDarkTheme, setIsDarkTheme] = useState(false); + const { refreshFolders } = useFolderContext(); // Move theme detection to useEffect to prevent hydration mismatch useEffect(() => { @@ -247,6 +249,9 @@ export function ImageOverlay({ if (onFolderCreated) { onFolderCreated(newFolderPath); } + + // Trigger sidebar refresh + refreshFolders(); } } catch (error) { console.error("Error creating folder:", error); @@ -281,6 +286,9 @@ export function ImageOverlay({ // Update the parent component with the full folder list onFolderCreated(result.folders); + // Trigger sidebar refresh + refreshFolders(); + toast.success("Folders refreshed", { description: `${result.folders.length} folders available` }); diff --git a/frontend/components/VideoCard.tsx b/frontend/components/VideoCard.tsx index 838ff00..b5e437e 100644 --- a/frontend/components/VideoCard.tsx +++ b/frontend/components/VideoCard.tsx @@ -40,7 +40,7 @@ export function VideoCard({ description, aspectRatio = "16:9", className, - tags = ["AI Generated", "Landscape"], + tags, blobName, onDelete, onClick, @@ -105,7 +105,6 @@ export function VideoCard({ // Don't try to load folder markers as videos if (src.includes('.folder')) { - console.log("Skipping folder marker:", src); setIsLoading(false); return; } @@ -148,7 +147,6 @@ export function VideoCard({ const handleError = () => { setIsLoading(false); - console.error("Error loading video:", src); }; video.addEventListener('loadedmetadata', handleMetadata); diff --git a/frontend/components/VideoOverlay.tsx b/frontend/components/VideoOverlay.tsx index 269e534..fd29d0c 100644 --- a/frontend/components/VideoOverlay.tsx +++ b/frontend/components/VideoOverlay.tsx @@ -20,6 +20,7 @@ import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group"; import { enhancePrompt, createFolder, MediaType, fetchFolders } from "@/services/api"; import { toast } from "sonner"; import { useTheme } from "next-themes"; +import { useFolderContext } from "@/context/folder-context"; import { Input } from "@/components/ui/input"; interface VideoOverlayProps { @@ -31,7 +32,6 @@ interface VideoOverlayProps { variants: string; modality: string; analyzeVideo: boolean; - mode: string; brandsProtection: string; imageModel: string; hd: boolean; @@ -67,6 +67,7 @@ export function VideoOverlay({ // Add theme context const { theme, resolvedTheme } = useTheme(); const [isDarkTheme, setIsDarkTheme] = useState(false); + const { refreshFolders } = useFolderContext(); // Move theme detection to useEffect to prevent hydration mismatch useEffect(() => { @@ -82,8 +83,7 @@ export function VideoOverlay({ const [prompt, setPrompt] = useState(""); // eslint-disable-next-line @typescript-eslint/no-unused-vars const [modality, setModality] = useState("text-to-video"); - // eslint-disable-next-line @typescript-eslint/no-unused-vars - const [mode, setMode] = useState<"dev" | "sora">("sora"); + // eslint-disable-next-line @typescript-eslint/no-unused-vars const [model, setModel] = useState("sora-v1.1"); // eslint-disable-next-line @typescript-eslint/no-unused-vars @@ -220,6 +220,9 @@ export function VideoOverlay({ if (onFolderCreated) { onFolderCreated(newFolderPath); } + + // Trigger sidebar refresh + refreshFolders(); } } catch (error) { console.error("Error creating folder:", error); @@ -254,6 +257,9 @@ export function VideoOverlay({ // Update the parent component with the full folder list onFolderCreated(result.folders); + // Trigger sidebar refresh + refreshFolders(); + toast.success("Folders refreshed", { description: `${result.folders.length} folders available` }); @@ -293,7 +299,6 @@ export function VideoOverlay({ variants, modality, analyzeVideo, - mode, brandsProtection, imageModel, hd, diff --git a/frontend/components/app-sidebar.tsx b/frontend/components/app-sidebar.tsx index 4cab29f..80b0475 100644 --- a/frontend/components/app-sidebar.tsx +++ b/frontend/components/app-sidebar.tsx @@ -8,6 +8,7 @@ import { ThemeToggle } from "@/components/theme-toggle"; import { useEffect, useState } from "react"; import { fetchFolders, MediaType } from "@/services/api"; import { useRouter, usePathname, useSearchParams } from "next/navigation"; +import { useFolderContext } from "@/context/folder-context"; import { motion } from "framer-motion"; import { @@ -89,13 +90,14 @@ export function AppSidebar() { const pathname = usePathname(); const searchParams = useSearchParams(); const currentFolderParam = searchParams.get('folder'); + const { folderRefreshTrigger } = useFolderContext(); // Only render logo after mounted on client to avoid hydration mismatch useEffect(() => { setMounted(true); }, []); - // Fetch folders on component mount + // Fetch folders on component mount and when refresh is triggered useEffect(() => { const loadImageFolders = async () => { setIsImageFoldersLoading(true); @@ -123,7 +125,7 @@ export function AppSidebar() { loadImageFolders(); loadVideoFolders(); - }, []); + }, [folderRefreshTrigger]); // Re-run when folders are created/updated // Determine logo based on theme const logoSrc = mounted && theme === "dark" diff --git a/frontend/context/folder-context.tsx b/frontend/context/folder-context.tsx new file mode 100644 index 0000000..60c50af --- /dev/null +++ b/frontend/context/folder-context.tsx @@ -0,0 +1,32 @@ +"use client" + +import React, { createContext, useContext, useState, useCallback } from 'react'; + +interface FolderContextType { + refreshFolders: () => void; + folderRefreshTrigger: number; +} + +const FolderContext = createContext(undefined); + +export function FolderProvider({ children }: { children: React.ReactNode }) { + const [folderRefreshTrigger, setFolderRefreshTrigger] = useState(0); + + const refreshFolders = useCallback(() => { + setFolderRefreshTrigger(prev => prev + 1); + }, []); + + return ( + + {children} + + ); +} + +export function useFolderContext() { + const context = useContext(FolderContext); + if (context === undefined) { + throw new Error('useFolderContext must be used within a FolderProvider'); + } + return context; +} \ No newline at end of file diff --git a/frontend/context/video-queue-context.tsx b/frontend/context/video-queue-context.tsx index 97d45f6..e9d3105 100644 --- a/frontend/context/video-queue-context.tsx +++ b/frontend/context/video-queue-context.tsx @@ -34,7 +34,6 @@ export function unregisterGalleryRefreshCallback(callback: RefreshCallback) { // Trigger all registered callbacks function notifyGalleryRefreshNeeded() { - console.log(`Notifying ${refreshCallbacks.length} gallery components to refresh`); refreshCallbacks.forEach(callback => { try { callback(); @@ -55,8 +54,8 @@ export interface VideoQueueItem { uploadStarted?: boolean; // Flag to track when uploads are starting analysisSettings?: { analyzeVideo: boolean; - mode: string; }; + folder?: string; // Store folder information directly in queue item } export interface VideoSettings { @@ -69,7 +68,6 @@ export interface VideoSettings { brandsList?: string[]; // Add list of brands to protect folder?: string; // Add folder information analyzeVideo?: boolean; // Add video analysis setting - mode?: string; // Add mode setting (dev/sora) } interface VideoQueueContextType { @@ -135,8 +133,6 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) // Handle uploading multiple generations if they exist if (updatedJob.generations && updatedJob.generations.length > 0) { - console.log(`Job ${updatedJob.id} completed with ${updatedJob.generations.length} generations`); - // Handle each generation const uploadPromises = updatedJob.generations .filter(generation => !uploadedGenerations.has(generation.id)) // Only process generations not already uploaded @@ -144,7 +140,6 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) // Mark this generation as being processed to prevent duplicate uploads uploadedGenerations.add(generation.id); - console.log(`Uploading generation ${generation.id} (not previously uploaded)`); const fileName = generateVideoFilename(generation.prompt || item.prompt, generation.id); // Define metadata for the uploaded asset @@ -160,46 +155,28 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) }; // Pass folder information to the download/upload function if available - const folder = item.job?.metadata?.folder || undefined; + // Use folder from queue item first, then fall back to job metadata + const folder = item.folder || item.job?.metadata?.folder || undefined; try { await downloadThenUploadToGallery(generation.id, fileName, metadata, folder); - console.log(`Successfully uploaded generation ${generation.id}`); // Analyze the video if analysis is enabled for this queue item const queueItem = queueItems.find(item => item.job?.id === updatedJob.id); const analysisSettings = queueItem?.analysisSettings; - if (analysisSettings?.analyzeVideo && analysisSettings?.mode === "sora") { + if (analysisSettings?.analyzeVideo) { try { - console.log(`🔍 Starting video analysis for uploaded generation: ${generation.id}`); - console.log(`📊 Using stored analysis settings:`, analysisSettings); - // Wait 10 seconds for Azure Blob Storage to propagate the uploaded video - console.log(`⏳ Waiting 10 seconds for video to be available in Azure Blob Storage...`); await new Promise(resolve => setTimeout(resolve, 10000)); const analysisResult = await analyzeAndUpdateVideoMetadata(fileName); - console.log(`✅ Video analysis completed for ${generation.id}:`, analysisResult.analysis); - toast.success("Video analysis completed", { - description: "AI analysis has been added to the video metadata", - duration: 3000 - }); - } catch (analysisError) { - console.error(`❌ Video analysis failed for ${generation.id}:`, analysisError); - toast.error("Video analysis failed", { - description: "The video was uploaded but analysis could not be completed", - duration: 5000 - }); - } - } else { - console.log(`⏭️ Skipping video analysis for ${generation.id}:`, { - analyzeVideo: analysisSettings?.analyzeVideo, - mode: analysisSettings?.mode, - reason: !analysisSettings?.analyzeVideo ? 'Analysis disabled' : - analysisSettings?.mode !== 'sora' ? 'Mode not sora' : 'Unknown' - }); + // Don't show individual analysis toasts - we'll show a consolidated one later + } catch (analysisError) { + console.error(`Video analysis failed for ${generation.id}:`, analysisError); + // Don't show individual analysis error toasts - log the error for debugging + } } return true; @@ -210,7 +187,8 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) }); if (uploadPromises.length > 0) { - toast.info(`Uploading ${uploadPromises.length} video${uploadPromises.length > 1 ? 's' : ''} to gallery...`); + // Use a loading toast that transforms into success/error + const uploadToastId = toast.loading(`Uploading ${uploadPromises.length} video${uploadPromises.length > 1 ? 's' : ''} to gallery...`); try { // Wait for all uploads to complete @@ -218,7 +196,17 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) const successCount = results.filter(Boolean).length; if (successCount > 0) { - toast.success(`${successCount} video${successCount > 1 ? 's' : ''} uploaded to gallery`, { + // Check if analysis was enabled for this job + const queueItem = queueItems.find(item => item.job?.id === updatedJob.id); + const analysisEnabled = queueItem?.analysisSettings?.analyzeVideo; + + const description = analysisEnabled + ? `${successCount} video${successCount > 1 ? 's' : ''} uploaded with AI analysis` + : `${successCount} video${successCount > 1 ? 's' : ''} ready in your gallery`; + + toast.success(`Videos uploaded successfully`, { + id: uploadToastId, + description, duration: 5000 }); @@ -227,11 +215,15 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) } if (successCount < uploadPromises.length) { - toast.error(`${uploadPromises.length - successCount} video${uploadPromises.length - successCount > 1 ? 's' : ''} failed to upload`); + toast.error(`${uploadPromises.length - successCount} video${uploadPromises.length - successCount > 1 ? 's' : ''} failed to upload`, { + id: uploadToastId + }); } } catch (uploadError) { console.error(`Error handling uploads:`, uploadError); - toast.error(`Some videos failed to upload`); + toast.error(`Some videos failed to upload`, { + id: uploadToastId + }); } } else { console.log(`All generations for job ${updatedJob.id} were already uploaded`); @@ -321,9 +313,9 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) status: "pending", createdAt: new Date(), analysisSettings: settings ? { - analyzeVideo: settings.analyzeVideo || false, - mode: settings.mode || "dev" + analyzeVideo: settings.analyzeVideo || false } : undefined, + folder: settings?.folder, // Store folder directly in queue item }; // Update queue with pending item @@ -354,14 +346,9 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) if (settings.analyzeVideo !== undefined) { jobMetadata.analyzeVideo = settings.analyzeVideo.toString(); } - if (settings.mode) { - jobMetadata.mode = settings.mode; - } // Check if we should use the unified endpoint with analysis - if (settings.analyzeVideo && settings.mode === "sora") { - console.log("🚀 Using unified video generation with analysis endpoint"); - + if (settings.analyzeVideo) { // Use the unified endpoint that handles generation + analysis atomically const unifiedRequest: VideoGenerationWithAnalysisRequest = { ...apiRequest, @@ -373,13 +360,8 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) const unifiedResponse = await createVideoGenerationWithAnalysis(unifiedRequest); const job = unifiedResponse.job; - // If analysis was completed, show success message - if (unifiedResponse.analysis_results && unifiedResponse.analysis_results.length > 0) { - toast.success("Video generation and analysis completed!", { - description: `Generated ${job.generations?.length || 0} videos with AI analysis`, - duration: 5000 - }); - } + // Don't show immediate success toast for unified endpoint + // The regular polling mechanism will handle the final success notification // Update the queue item with the completed job setQueueItems(prev => @@ -391,7 +373,8 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) job, status: "completed", progress: 100, - uploadComplete: true // Mark as complete since unified endpoint handles everything + uploadComplete: true, // Mark as complete since unified endpoint handles everything + folder: item.folder // Preserve folder information } : item ) @@ -412,7 +395,7 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) setQueueItems(prev => prev.map(item => item.id === tempId - ? { ...item, id: job.id, job } + ? { ...item, id: job.id, job, folder: item.folder } // Preserve folder information : item ) ); @@ -420,7 +403,7 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) return job.id; } } else { - // Use traditional endpoint for non-analysis jobs or dev mode + // Use traditional endpoint for non-analysis jobs const job = await createVideoGenerationJob({ ...apiRequest, metadata: jobMetadata @@ -430,7 +413,7 @@ export function VideoQueueProvider({ children }: { children: React.ReactNode }) setQueueItems(prev => prev.map(item => item.id === tempId - ? { ...item, id: job.id, job } + ? { ...item, id: job.id, job, folder: item.folder } // Preserve folder information : item ) );