Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Backend/.env-example
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ port=5432
dbname=postgres
GROQ_API_KEY=
SUPABASE_URL=
SUPABASE_KEY=
SUPABASE_KEY=
GEMINI_API_KEY=
15 changes: 9 additions & 6 deletions Backend/app/db/seed.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from datetime import datetime, timezone
from db.db import AsyncSessionLocal
from models.models import User
from datetime import datetime
from app.db.db import AsyncSessionLocal
from app.models.models import User


async def seed_db():
Expand All @@ -12,6 +12,8 @@ async def seed_db():
"password": "password123",
"role": "creator",
"bio": "Lifestyle and travel content creator",
"profile_image": None,
"created_at": datetime.utcnow()
},
{
"id": "6dbfcdd5-795f-49c1-8f7a-a5538b8c6f6f",
Expand All @@ -20,6 +22,8 @@ async def seed_db():
"password": "password123",
"role": "brand",
"bio": "Sustainable fashion brand looking for influencers",
"profile_image": None,
"created_at": datetime.utcnow()
},
]

Expand All @@ -40,11 +44,10 @@ async def seed_db():
id=user_data["id"],
username=user_data["username"],
email=user_data["email"],
password_hash=user_data[
"password"
], # Using plain password directly
role=user_data["role"],
profile_image=user_data["profile_image"],
bio=user_data["bio"],
created_at=user_data["created_at"]
)
session.add(user)
print(f"Created user: {user_data['email']}")
Expand Down
14 changes: 9 additions & 5 deletions Backend/app/main.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from db.db import engine
from db.seed import seed_db
from models import models, chat
from routes.post import router as post_router
from routes.chat import router as chat_router
from .db.db import engine
from .db.seed import seed_db
from .models import models, chat
from .routes.post import router as post_router
from .routes.chat import router as chat_router
from .routes.match import router as match_router
from sqlalchemy.exc import SQLAlchemyError
import logging
import os
from dotenv import load_dotenv
from contextlib import asynccontextmanager
from app.routes import ai

# Load environment variables
load_dotenv()
Expand Down Expand Up @@ -51,6 +53,8 @@ async def lifespan(app: FastAPI):
# Include the routes
app.include_router(post_router)
app.include_router(chat_router)
app.include_router(match_router)
app.include_router(ai.router)


@app.get("/")
Expand Down
2 changes: 1 addition & 1 deletion Backend/app/models/chat.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from sqlalchemy import Column, String, ForeignKey, DateTime, Enum, UniqueConstraint
from sqlalchemy.orm import relationship
from datetime import datetime, timezone
from db.db import Base
from app.db.db import Base
import uuid
import enum

Expand Down
13 changes: 5 additions & 8 deletions Backend/app/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,11 @@
DECIMAL,
DateTime,
Boolean,
TIMESTAMP,
)
from sqlalchemy.orm import relationship
from datetime import datetime, timezone
from db.db import Base
from datetime import datetime
from app.db.db import Base
import uuid


Expand All @@ -31,14 +32,10 @@ class User(Base):
role = Column(String, nullable=False) # 'creator' or 'brand'
profile_image = Column(Text, nullable=True)
bio = Column(Text, nullable=True)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
created_at = Column(TIMESTAMP, default=datetime.utcnow)

is_online = Column(Boolean, default=False) # ✅ Track if user is online
last_seen = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
last_seen = Column(TIMESTAMP, default=datetime.utcnow)
Comment on lines +35 to +38
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix timezone inconsistency in timestamp handling.

The User model now uses timezone-naive datetime.utcnow() with TIMESTAMP columns, while other models in the same file still use timezone-aware datetime.now(timezone.utc) with DateTime(timezone=True). This inconsistency can cause issues when comparing timestamps across different models or when the database expects consistent timezone handling.

Consider one of these solutions:

Option 1 (Recommended): Use timezone-aware timestamps consistently

-from datetime import datetime
+from datetime import datetime, timezone
-    created_at = Column(TIMESTAMP, default=datetime.utcnow)
+    created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
-    last_seen = Column(TIMESTAMP, default=datetime.utcnow)
+    last_seen = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))

Option 2: Update all models to use TIMESTAMP consistently

# Update all other DateTime(timezone=True) columns to use TIMESTAMP with datetime.utcnow
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
created_at = Column(TIMESTAMP, default=datetime.utcnow)
is_online = Column(Boolean, default=False) # ✅ Track if user is online
last_seen = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
last_seen = Column(TIMESTAMP, default=datetime.utcnow)
# at the top of the file, update the import
-from datetime import datetime
+from datetime import datetime, timezone
# in your User model, replace the naive TIMESTAMP fields
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
is_online = Column(Boolean, default=False) # ✅ Track if user is online
- last_seen = Column(TIMESTAMP, default=datetime.utcnow)
+ last_seen = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
🤖 Prompt for AI Agents
In Backend/app/models/models.py around lines 35 to 38, the timestamp columns use
timezone-naive datetime.utcnow() with TIMESTAMP, causing inconsistency with
other models that use timezone-aware datetime.now(timezone.utc) and
DateTime(timezone=True). To fix this, update the created_at and last_seen
columns to use DateTime(timezone=True) and set their default to a timezone-aware
function like datetime.now(timezone.utc) to ensure consistent timezone handling
across all models.


audience = relationship("AudienceInsights", back_populates="user", uselist=False)
sponsorships = relationship("Sponsorship", back_populates="brand")
Expand Down
66 changes: 66 additions & 0 deletions Backend/app/routes/ai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# FastAPI router for AI-powered endpoints, including trending niches
from fastapi import APIRouter
from datetime import date
import os
import requests
import json
from supabase import create_client, Client

# Initialize router
router = APIRouter()

# Load environment variables for Supabase and Gemini
SUPABASE_URL = os.environ.get("SUPABASE_URL")
SUPABASE_KEY = os.environ.get("SUPABASE_KEY")
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)

def fetch_from_gemini():


prompt = (
"List the top 6 trending content niches for creators and brands this week. For each, provide: name (the niche), insight (a short qualitative reason why it's trending), and global_activity (a number from 1 to 5, where 5 means very high global activity in this category, and 1 means low).Return as a JSON array of objects with keys: name, insight, global_activity."
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. how will gemini know this week's trend? is this grounded on google trends data?
  2. why are we making api calls.. lets use sdks

)
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-lite:generateContent?key={GEMINI_API_KEY}"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Fix formatting and add proper error handling.

The function has formatting issues and needs better error handling for API responses.

+
 def fetch_from_gemini():
-
-
+    """Fetch trending niches from Gemini API."""
+    if not GEMINI_API_KEY:
+        raise ValueError("GEMINI_API_KEY environment variable is required")
+    
     prompt = (
         "List the top 6 trending content niches for creators and brands this week. For each, provide: name (the niche), insight (a short qualitative reason why it's trending), and global_activity (a number from 1 to 5, where 5 means very high global activity in this category, and 1 means low).Return as a JSON array of objects with keys: name, insight, global_activity."
     )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def fetch_from_gemini():
prompt = (
"List the top 6 trending content niches for creators and brands this week. For each, provide: name (the niche), insight (a short qualitative reason why it's trending), and global_activity (a number from 1 to 5, where 5 means very high global activity in this category, and 1 means low).Return as a JSON array of objects with keys: name, insight, global_activity."
)
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-lite:generateContent?key={GEMINI_API_KEY}"
# (ensure there are two blank lines above any top‐level function)
def fetch_from_gemini():
"""Fetch trending niches from Gemini API."""
if not GEMINI_API_KEY:
raise ValueError("GEMINI_API_KEY environment variable is required")
prompt = (
"List the top 6 trending content niches for creators and brands this week. "
"For each, provide: name (the niche), insight (a short qualitative reason "
"why it's trending), and global_activity (a number from 1 to 5, where 5 means "
"very high global activity in this category, and 1 means low).Return as a JSON "
"array of objects with keys: name, insight, global_activity."
)
url = f"https://generativelanguage.googleapis.com/v1beta/models/" \
f"gemini-2.0-flash-lite:generateContent?key={GEMINI_API_KEY}"
🧰 Tools
🪛 Flake8 (7.2.0)

[error] 18-18: expected 2 blank lines, found 1

(E302)


[error] 21-21: too many blank lines (2)

(E303)

🤖 Prompt for AI Agents
In Backend/app/routes/ai.py around lines 18 to 24, the function
fetch_from_gemini has formatting issues and lacks error handling for the API
call. Fix the formatting by properly indenting the code and separating
statements clearly. Add try-except blocks to catch exceptions during the API
request and handle non-success HTTP responses by checking the status code and
raising or logging appropriate errors.

resp = requests.post(url, json={"contents": [{"parts": [{"text": prompt}]}]})
resp.raise_for_status()
print("Gemini raw response:", resp.text)
data = resp.json()
print("Gemini parsed JSON:", data)
text = data['candidates'][0]['content']['parts'][0]['text']
Comment on lines +43 to +46
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Remove sensitive data from debug logs.

Printing raw API responses could expose sensitive information in logs. Consider using proper logging levels and sanitizing output.

-    print("Gemini raw response:", resp.text)
+    # Log success without exposing sensitive data
+    print("Gemini API request successful")
     data = resp.json()
-    print("Gemini parsed JSON:", data)
     text = data['candidates'][0]['content']['parts'][0]['text']
-    print("Gemini text to parse as JSON:", text)

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In Backend/app/routes/ai.py around lines 27 to 30, the code prints the raw API
response and parsed JSON directly, which may expose sensitive data in logs.
Replace these print statements with logging calls at an appropriate log level
(e.g., debug) and sanitize or redact any sensitive information before logging.
Ensure that sensitive fields are not included in the logs to protect data
privacy.

print("Gemini text to parse as JSON:", text)
# Remove Markdown code block if present
if text.strip().startswith('```'):
text = text.strip().split('\n', 1)[1] # Remove the first line (```json)
text = text.rsplit('```', 1)[0] # Remove the last ```
text = text.strip()
return json.loads(text)
Comment on lines +46 to +53
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Unprotected JSON parse may explode on malformed output

If Gemini returns non-JSON or extra prose, json.loads will raise and the entire request falls into the broad except block. Wrap parsing in a dedicated try/except ValueError and surface a 502/500 with context instead of silent fallback.

-    return json.loads(text)
+    try:
+        return json.loads(text)
+    except ValueError as exc:
+        logger.warning("Gemini returned non-JSON payload: %s", text[:120])
+        raise RuntimeError("Gemini payload parsing failed") from exc

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In Backend/app/routes/ai.py around lines 35 to 42, the call to json.loads is
unprotected and will raise an exception if the text is not valid JSON, causing
the entire request to fall into a broad except block. Wrap the json.loads call
in a try/except block that catches ValueError, and in the except block, return
or raise a 502 or 500 error with a clear message indicating JSON parsing failed,
so the error is surfaced with context instead of silently falling back.


@router.get("/api/trending-niches")
def trending_niches():
"""
API endpoint to get trending niches for the current day.
- If today's data exists in Supabase, return it.
- Otherwise, fetch from Gemini, store in Supabase, and return the new data.
- If Gemini fails, fallback to the most recent data available.
"""
today = str(date.today())
# Check if today's data exists in Supabase
result = supabase.table("trending_niches").select("*").eq("fetched_at", today).execute()
if not result.data:
# Fetch from Gemini and store
try:
niches = fetch_from_gemini()
for niche in niches:
supabase.table("trending_niches").insert({
"name": niche["name"],
"insight": niche["insight"],
"global_activity": int(niche["global_activity"]),
"fetched_at": today
}).execute()
result = supabase.table("trending_niches").select("*").eq("fetched_at", today).execute()
except Exception as e:
print("Gemini fetch failed:", e)
# fallback: serve most recent data
result = supabase.table("trending_niches").select("*").order("fetched_at", desc=True).limit(6).execute()
return result.data
7 changes: 7 additions & 0 deletions Backend/app/routes/auth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from fastapi import APIRouter

router = APIRouter()

@router.get("/auth/ping")
def ping():
return {"message": "Auth route is working!"}
8 changes: 4 additions & 4 deletions Backend/app/routes/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@
HTTPException,
)
from sqlalchemy.ext.asyncio import AsyncSession
from db.db import get_db
from services.chat_services import chat_service
from ..db.db import get_db
from ..services.chat_services import chat_service
from redis.asyncio import Redis
from services.redis_client import get_redis
from ..services.redis_client import get_redis
import asyncio
from services.chat_pubsub import listen_to_channel
from ..services.chat_pubsub import listen_to_channel
Comment on lines +10 to +15
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Verify the relative import structure to avoid import resolution issues.

The change to relative imports may cause issues with the package structure. Static analysis tools are flagging these as "relative imports beyond top-level package."

Please verify the current package structure and ensure these relative imports work correctly in your deployment environment:

Consider reverting to absolute imports if the relative imports cause issues in your deployment environment:

-from ..db.db import get_db
-from ..services.chat_services import chat_service
-from ..services.redis_client import get_redis
-from ..services.chat_pubsub import listen_to_channel
+from app.db.db import get_db
+from app.services.chat_services import chat_service
+from app.services.redis_client import get_redis
+from app.services.chat_pubsub import listen_to_channel

🏁 Script executed:

#!/bin/bash
# Description: Check the package structure and verify import paths

# Check the directory structure
echo "=== Backend directory structure ==="
find Backend -type f -name "*.py" | head -20

echo -e "\n=== Checking if the imported modules exist ==="
# Check if the imported modules exist at the expected paths
ls -la Backend/app/db/db.py
ls -la Backend/app/services/chat_services.py
ls -la Backend/app/services/redis_client.py
ls -la Backend/app/services/chat_pubsub.py

echo -e "\n=== Checking for __init__.py files ==="
# Check for __init__.py files that make directories Python packages
find Backend -name "__init__.py" -type f

Length of output: 1409


Ensure package initialization or switch to absolute imports to resolve E0402

Verified that all referenced modules exist under Backend/app, but no __init__.py files are present—so static analysis flags “relative imports beyond top-level package.” Choose one of the following fixes:

• Add empty __init__.py in each folder to turn them into proper packages:

  • Backend/app/
  • Backend/app/db/
  • Backend/app/routes/
  • Backend/app/services/

• Or update imports in Backend/app/routes/chat.py to use the app package root:

-from ..db.db import get_db
-from ..services.chat_services import chat_service
-from ..services.redis_client import get_redis
-from ..services.chat_pubsub import listen_to_channel
+from app.db.db import get_db
+from app.services.chat_services import chat_service
+from app.services.redis_client import get_redis
+from app.services.chat_pubsub import listen_to_channel
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
from ..db.db import get_db
from ..services.chat_services import chat_service
from redis.asyncio import Redis
from services.redis_client import get_redis
from ..services.redis_client import get_redis
import asyncio
from services.chat_pubsub import listen_to_channel
from ..services.chat_pubsub import listen_to_channel
from app.db.db import get_db
from app.services.chat_services import chat_service
from redis.asyncio import Redis
from app.services.redis_client import get_redis
import asyncio
from app.services.chat_pubsub import listen_to_channel
🧰 Tools
🪛 Pylint (3.3.7)

[error] 10-10: Attempted relative import beyond top-level package

(E0402)


[error] 11-11: Attempted relative import beyond top-level package

(E0402)


[error] 13-13: Attempted relative import beyond top-level package

(E0402)


[error] 15-15: Attempted relative import beyond top-level package

(E0402)

🤖 Prompt for AI Agents
In Backend/app/routes/chat.py around lines 10 to 15, the relative imports cause
E0402 errors because the directories lack __init__.py files. Fix this by adding
empty __init__.py files in Backend/app/, Backend/app/db/, Backend/app/routes/,
and Backend/app/services/ to make them proper packages, or alternatively, change
the imports to use absolute imports starting from the app package root.


router = APIRouter(prefix="/chat", tags=["Chat"])

Expand Down
29 changes: 29 additions & 0 deletions Backend/app/routes/match.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
from fastapi import APIRouter, HTTPException
from supabase import create_client, Client
import os
from dotenv import load_dotenv
from ..services.db_service import match_creators_for_brand, match_brands_for_creator

# Load environment variables
load_dotenv()
url: str = os.getenv("SUPABASE_URL")
key: str = os.getenv("SUPABASE_KEY")
supabase: Client = create_client(url, key)

router = APIRouter(prefix="/match", tags=["Matching"])

@router.get("/creators-for-brand/{sponsorship_id}")
def get_creators_for_brand(sponsorship_id: str):
matches = match_creators_for_brand(sponsorship_id)
if not matches:
raise HTTPException(status_code=404, detail="No matching creators found.")
return {"matches": matches}

@router.get("/brands-for-creator/{creator_id}")
def get_brands_for_creator(creator_id: str):
matches = match_brands_for_creator(creator_id)
if not matches:
raise HTTPException(status_code=404, detail="No matching brand campaigns found.")
return {"matches": matches}

# Placeholder for endpoints, logic to be added next
6 changes: 3 additions & 3 deletions Backend/app/routes/post.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from db.db import AsyncSessionLocal
from models.models import (
from ..db.db import AsyncSessionLocal
from ..models.models import (
User, AudienceInsights, Sponsorship, UserPost,
SponsorshipApplication, SponsorshipPayment, Collaboration
)
from schemas.schema import (
from ..schemas.schema import (
UserCreate, AudienceInsightsCreate, SponsorshipCreate, UserPostCreate,
SponsorshipApplicationCreate, SponsorshipPaymentCreate, CollaborationCreate
)
Comment on lines +4 to 12
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Remove unused imports to clean up the codebase.

The static analysis tools correctly identified that these imports are unused. This file uses Supabase client directly for database operations, making the SQLAlchemy imports (AsyncSessionLocal, model classes, and schema classes) unnecessary.

Remove the unused imports:

-from ..db.db import AsyncSessionLocal
-from ..models.models import (
-    User, AudienceInsights, Sponsorship, UserPost,
-    SponsorshipApplication, SponsorshipPayment, Collaboration
-)
-from ..schemas.schema import (
-    UserCreate, AudienceInsightsCreate, SponsorshipCreate, UserPostCreate,
-    SponsorshipApplicationCreate, SponsorshipPaymentCreate, CollaborationCreate
-)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
from ..db.db import AsyncSessionLocal
from ..models.models import (
User, AudienceInsights, Sponsorship, UserPost,
SponsorshipApplication, SponsorshipPayment, Collaboration
)
from schemas.schema import (
from ..schemas.schema import (
UserCreate, AudienceInsightsCreate, SponsorshipCreate, UserPostCreate,
SponsorshipApplicationCreate, SponsorshipPaymentCreate, CollaborationCreate
)
🧰 Tools
🪛 Ruff (0.11.9)

4-4: ..db.db.AsyncSessionLocal imported but unused

Remove unused import: ..db.db.AsyncSessionLocal

(F401)


6-6: ..models.models.User imported but unused

Remove unused import

(F401)


6-6: ..models.models.AudienceInsights imported but unused

Remove unused import

(F401)


6-6: ..models.models.Sponsorship imported but unused

Remove unused import

(F401)


6-6: ..models.models.UserPost imported but unused

Remove unused import

(F401)


7-7: ..models.models.SponsorshipApplication imported but unused

Remove unused import

(F401)


7-7: ..models.models.SponsorshipPayment imported but unused

Remove unused import

(F401)


7-7: ..models.models.Collaboration imported but unused

Remove unused import

(F401)

🪛 Flake8 (7.2.0)

[error] 4-4: '..db.db.AsyncSessionLocal' imported but unused

(F401)


[error] 5-5: '..models.models.User' imported but unused

(F401)


[error] 5-5: '..models.models.AudienceInsights' imported but unused

(F401)


[error] 5-5: '..models.models.Sponsorship' imported but unused

(F401)


[error] 5-5: '..models.models.UserPost' imported but unused

(F401)


[error] 5-5: '..models.models.SponsorshipApplication' imported but unused

(F401)


[error] 5-5: '..models.models.SponsorshipPayment' imported but unused

(F401)


[error] 5-5: '..models.models.Collaboration' imported but unused

(F401)

🪛 Pylint (3.3.7)

[error] 4-4: Attempted relative import beyond top-level package

(E0402)


[error] 5-8: Attempted relative import beyond top-level package

(E0402)


[error] 9-12: Attempted relative import beyond top-level package

(E0402)

🤖 Prompt for AI Agents
In Backend/app/routes/post.py around lines 4 to 12, remove all imports related
to AsyncSessionLocal, model classes (User, AudienceInsights, Sponsorship,
UserPost, SponsorshipApplication, SponsorshipPayment, Collaboration), and schema
classes (UserCreate, AudienceInsightsCreate, SponsorshipCreate, UserPostCreate,
SponsorshipApplicationCreate, SponsorshipPaymentCreate, CollaborationCreate)
since they are not used in this file. This cleanup will keep the codebase tidy
and avoid unnecessary dependencies.

Expand Down
1 change: 0 additions & 1 deletion Backend/app/schemas/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
class UserCreate(BaseModel):
username: str
email: str
password_hash: str
role: str
profile_image: Optional[str] = None
bio: Optional[str] = None
Expand Down
4 changes: 2 additions & 2 deletions Backend/app/services/chat_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import select
from datetime import datetime, timezone
from models.models import User
from models.chat import ChatList, ChatMessage, MessageStatus
from app.models.models import User
from app.models.chat import ChatList, ChatMessage, MessageStatus
from typing import Dict
from redis.asyncio import Redis
import logging
Expand Down
85 changes: 85 additions & 0 deletions Backend/app/services/db_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
from supabase import create_client, Client
import os
from dotenv import load_dotenv
from typing import List, Dict, Any

# Load environment variables
load_dotenv()
url: str = os.getenv("SUPABASE_URL")
key: str = os.getenv("SUPABASE_KEY")
supabase: Client = create_client(url, key)
Comment on lines +8 to +10
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Add validation and error handling for environment variables.

The code directly uses environment variables without validation. If these variables are not set, the Supabase client creation will fail silently.

Apply this diff to add validation:

-url: str = os.getenv("SUPABASE_URL")
-key: str = os.getenv("SUPABASE_KEY")
-supabase: Client = create_client(url, key)
+url: str = os.getenv("SUPABASE_URL")
+key: str = os.getenv("SUPABASE_KEY")
+
+if not url or not key:
+    raise ValueError("SUPABASE_URL and SUPABASE_KEY must be set in environment variables")
+
+supabase: Client = create_client(url, key)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
url: str = os.getenv("SUPABASE_URL")
key: str = os.getenv("SUPABASE_KEY")
supabase: Client = create_client(url, key)
url: str = os.getenv("SUPABASE_URL")
key: str = os.getenv("SUPABASE_KEY")
if not url or not key:
raise ValueError("SUPABASE_URL and SUPABASE_KEY must be set in environment variables")
supabase: Client = create_client(url, key)
🤖 Prompt for AI Agents
In Backend/app/services/db_service.py around lines 8 to 10, the environment
variables SUPABASE_URL and SUPABASE_KEY are used without validation, which can
cause silent failures if they are missing. Add checks to verify that both
variables are set and raise a clear exception or log an error if either is
missing before calling create_client. This ensures the Supabase client is only
created with valid configuration.



def match_creators_for_brand(sponsorship_id: str) -> List[Dict[str, Any]]:
# Fetch sponsorship details
sponsorship_resp = supabase.table("sponsorships").select("*").eq("id", sponsorship_id).execute()
if not sponsorship_resp.data:
return []
sponsorship = sponsorship_resp.data[0]

# Fetch all audience insights (for creators)
audience_resp = supabase.table("audience_insights").select("*").execute()
creators = []
for audience in audience_resp.data:
# Basic matching logic: audience, engagement, price, etc.
match_score = 0
# Audience age group overlap
if 'required_audience' in sponsorship and 'audience_age_group' in audience:
required_ages = sponsorship['required_audience'].get('age_group', [])
creator_ages = audience.get('audience_age_group', {})
overlap = sum([creator_ages.get(age, 0) for age in required_ages])
if overlap > 0:
match_score += 1
# Audience location overlap
if 'required_audience' in sponsorship and 'audience_location' in audience:
required_locs = sponsorship['required_audience'].get('location', [])
creator_locs = audience.get('audience_location', {})
overlap = sum([creator_locs.get(loc, 0) for loc in required_locs])
if overlap > 0:
match_score += 1
# Engagement rate
if audience.get('engagement_rate', 0) >= sponsorship.get('engagement_minimum', 0):
match_score += 1
# Price expectation
if audience.get('price_expectation', 0) <= sponsorship.get('budget', 0):
match_score += 1
if match_score >= 2: # Threshold for a match
creators.append({"user_id": audience["user_id"], "match_score": match_score, **audience})
return creators
Comment on lines +13 to +48
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Optimize performance and extract duplicated matching logic.

The function fetches all audience insights which could be a performance bottleneck. Also, the matching logic is duplicated in both functions.

Consider these improvements:

  1. Add pagination or filtering to limit the number of audience insights fetched
  2. Extract the matching logic into a separate function to avoid duplication
  3. Add error handling for database operations

Here's a refactored approach:

+def calculate_match_score(sponsorship: Dict[str, Any], audience: Dict[str, Any]) -> int:
+    """Calculate match score between sponsorship and audience."""
+    match_score = 0
+    
+    # Audience age group overlap
+    if 'required_audience' in sponsorship and 'audience_age_group' in audience:
+        required_ages = sponsorship['required_audience'].get('age_group', [])
+        creator_ages = audience.get('audience_age_group', {})
+        overlap = sum(creator_ages.get(age, 0) for age in required_ages)
+        if overlap > 0:
+            match_score += 1
+    
+    # Audience location overlap
+    if 'required_audience' in sponsorship and 'audience_location' in audience:
+        required_locs = sponsorship['required_audience'].get('location', [])
+        creator_locs = audience.get('audience_location', {})
+        overlap = sum(creator_locs.get(loc, 0) for loc in required_locs)
+        if overlap > 0:
+            match_score += 1
+    
+    # Engagement rate
+    if audience.get('engagement_rate', 0) >= sponsorship.get('engagement_minimum', 0):
+        match_score += 1
+    
+    # Price expectation
+    if audience.get('price_expectation', 0) <= sponsorship.get('budget', 0):
+        match_score += 1
+    
+    return match_score

 def match_creators_for_brand(sponsorship_id: str) -> List[Dict[str, Any]]:
-    # Fetch sponsorship details
-    sponsorship_resp = supabase.table("sponsorships").select("*").eq("id", sponsorship_id).execute()
-    if not sponsorship_resp.data:
-        return []
-    sponsorship = sponsorship_resp.data[0]
-
-    # Fetch all audience insights (for creators)
-    audience_resp = supabase.table("audience_insights").select("*").execute()
+    try:
+        # Fetch sponsorship details
+        sponsorship_resp = supabase.table("sponsorships").select("*").eq("id", sponsorship_id).execute()
+        if not sponsorship_resp.data:
+            return []
+        sponsorship = sponsorship_resp.data[0]
+
+        # TODO: Add pagination or filtering based on sponsorship criteria
+        # Fetch all audience insights (for creators)
+        audience_resp = supabase.table("audience_insights").select("*").execute()
+    except Exception as e:
+        print(f"Error fetching data: {e}")
+        return []
+    
     creators = []
     for audience in audience_resp.data:
-        # Basic matching logic: audience, engagement, price, etc.
-        match_score = 0
-        # ... (rest of matching logic)
+        match_score = calculate_match_score(sponsorship, audience)
         if match_score >= 2:  # Threshold for a match
             creators.append({"user_id": audience["user_id"], "match_score": match_score, **audience})
     return creators

Would you like me to implement the complete refactored solution with proper error handling and pagination?

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def match_creators_for_brand(sponsorship_id: str) -> List[Dict[str, Any]]:
# Fetch sponsorship details
sponsorship_resp = supabase.table("sponsorships").select("*").eq("id", sponsorship_id).execute()
if not sponsorship_resp.data:
return []
sponsorship = sponsorship_resp.data[0]
# Fetch all audience insights (for creators)
audience_resp = supabase.table("audience_insights").select("*").execute()
creators = []
for audience in audience_resp.data:
# Basic matching logic: audience, engagement, price, etc.
match_score = 0
# Audience age group overlap
if 'required_audience' in sponsorship and 'audience_age_group' in audience:
required_ages = sponsorship['required_audience'].get('age_group', [])
creator_ages = audience.get('audience_age_group', {})
overlap = sum([creator_ages.get(age, 0) for age in required_ages])
if overlap > 0:
match_score += 1
# Audience location overlap
if 'required_audience' in sponsorship and 'audience_location' in audience:
required_locs = sponsorship['required_audience'].get('location', [])
creator_locs = audience.get('audience_location', {})
overlap = sum([creator_locs.get(loc, 0) for loc in required_locs])
if overlap > 0:
match_score += 1
# Engagement rate
if audience.get('engagement_rate', 0) >= sponsorship.get('engagement_minimum', 0):
match_score += 1
# Price expectation
if audience.get('price_expectation', 0) <= sponsorship.get('budget', 0):
match_score += 1
if match_score >= 2: # Threshold for a match
creators.append({"user_id": audience["user_id"], "match_score": match_score, **audience})
return creators
def calculate_match_score(sponsorship: Dict[str, Any], audience: Dict[str, Any]) -> int:
"""Calculate match score between sponsorship and audience."""
match_score = 0
# Audience age group overlap
if 'required_audience' in sponsorship and 'audience_age_group' in audience:
required_ages = sponsorship['required_audience'].get('age_group', [])
creator_ages = audience.get('audience_age_group', {})
overlap = sum(creator_ages.get(age, 0) for age in required_ages)
if overlap > 0:
match_score += 1
# Audience location overlap
if 'required_audience' in sponsorship and 'audience_location' in audience:
required_locs = sponsorship['required_audience'].get('location', [])
creator_locs = audience.get('audience_location', {})
overlap = sum(creator_locs.get(loc, 0) for loc in required_locs)
if overlap > 0:
match_score += 1
# Engagement rate
if audience.get('engagement_rate', 0) >= sponsorship.get('engagement_minimum', 0):
match_score += 1
# Price expectation
if audience.get('price_expectation', 0) <= sponsorship.get('budget', 0):
match_score += 1
return match_score
def match_creators_for_brand(sponsorship_id: str) -> List[Dict[str, Any]]:
try:
# Fetch sponsorship details
sponsorship_resp = (
supabase.table("sponsorships")
.select("*")
.eq("id", sponsorship_id)
.execute()
)
if not sponsorship_resp.data:
return []
sponsorship = sponsorship_resp.data[0]
# TODO: Add pagination or filtering based on sponsorship criteria
# Fetch all audience insights (for creators)
audience_resp = supabase.table("audience_insights").select("*").execute()
except Exception as e:
print(f"Error fetching data: {e}")
return []
creators = []
for audience in audience_resp.data:
match_score = calculate_match_score(sponsorship, audience)
if match_score >= 2: # Threshold for a match
creators.append({
"user_id": audience["user_id"],
"match_score": match_score,
**audience
})
return creators
🧰 Tools
🪛 Pylint (3.3.7)

[refactor] 30-30: Consider using a generator instead 'sum(creator_ages.get(age, 0) for age in required_ages)'

(R1728)


[refactor] 37-37: Consider using a generator instead 'sum(creator_locs.get(loc, 0) for loc in required_locs)'

(R1728)

🤖 Prompt for AI Agents
In Backend/app/services/db_service.py from lines 13 to 48, the function
match_creators_for_brand fetches all audience insights without pagination,
causing potential performance issues, and duplicates matching logic. To fix
this, implement pagination or filtering when querying audience insights to limit
data volume, extract the matching logic into a separate helper function to avoid
duplication, and add try-except blocks or error handling around database calls
to gracefully handle failures. This will optimize performance, improve code
maintainability, and increase robustness.



def match_brands_for_creator(creator_id: str) -> List[Dict[str, Any]]:
# Fetch creator's audience insights
audience_resp = supabase.table("audience_insights").select("*").eq("user_id", creator_id).execute()
if not audience_resp.data:
return []
audience = audience_resp.data[0]

# Fetch all sponsorships
sponsorships_resp = supabase.table("sponsorships").select("*").execute()
matches = []
for sponsorship in sponsorships_resp.data:
match_score = 0
# Audience age group overlap
if 'required_audience' in sponsorship and 'audience_age_group' in audience:
required_ages = sponsorship['required_audience'].get('age_group', [])
creator_ages = audience.get('audience_age_group', {})
overlap = sum([creator_ages.get(age, 0) for age in required_ages])
if overlap > 0:
match_score += 1
# Audience location overlap
if 'required_audience' in sponsorship and 'audience_location' in audience:
required_locs = sponsorship['required_audience'].get('location', [])
creator_locs = audience.get('audience_location', {})
overlap = sum([creator_locs.get(loc, 0) for loc in required_locs])
if overlap > 0:
match_score += 1
# Engagement rate
if audience.get('engagement_rate', 0) >= sponsorship.get('engagement_minimum', 0):
match_score += 1
# Price expectation
if audience.get('price_expectation', 0) <= sponsorship.get('budget', 0):
match_score += 1
if match_score >= 2: # Threshold for a match
matches.append({"sponsorship_id": sponsorship["id"], "match_score": match_score, **sponsorship})
return matches
Comment on lines +51 to +85
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Use the extracted matching logic and optimize performance.

This function has the same issues as match_creators_for_brand - it fetches all sponsorships and duplicates the matching logic.

With the extracted calculate_match_score function from the previous comment, this function can be simplified:

 def match_brands_for_creator(creator_id: str) -> List[Dict[str, Any]]:
-    # Fetch creator's audience insights
-    audience_resp = supabase.table("audience_insights").select("*").eq("user_id", creator_id).execute()
-    if not audience_resp.data:
-        return []
-    audience = audience_resp.data[0]
-
-    # Fetch all sponsorships
-    sponsorships_resp = supabase.table("sponsorships").select("*").execute()
+    try:
+        # Fetch creator's audience insights
+        audience_resp = supabase.table("audience_insights").select("*").eq("user_id", creator_id).execute()
+        if not audience_resp.data:
+            return []
+        audience = audience_resp.data[0]
+
+        # TODO: Add filtering based on creator's criteria (e.g., active sponsorships only)
+        # Fetch all sponsorships
+        sponsorships_resp = supabase.table("sponsorships").select("*").execute()
+    except Exception as e:
+        print(f"Error fetching data: {e}")
+        return []
+    
     matches = []
     for sponsorship in sponsorships_resp.data:
-        match_score = 0
-        # ... (duplicated matching logic)
+        match_score = calculate_match_score(sponsorship, audience)
         if match_score >= 2:  # Threshold for a match
             matches.append({"sponsorship_id": sponsorship["id"], "match_score": match_score, **sponsorship})
     return matches

Performance concern: Fetching all sponsorships could be problematic as the database grows. Consider adding filters like:

  • Active sponsorships only
  • Sponsorships within creator's price range
  • Sponsorships matching creator's categories
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def match_brands_for_creator(creator_id: str) -> List[Dict[str, Any]]:
# Fetch creator's audience insights
audience_resp = supabase.table("audience_insights").select("*").eq("user_id", creator_id).execute()
if not audience_resp.data:
return []
audience = audience_resp.data[0]
# Fetch all sponsorships
sponsorships_resp = supabase.table("sponsorships").select("*").execute()
matches = []
for sponsorship in sponsorships_resp.data:
match_score = 0
# Audience age group overlap
if 'required_audience' in sponsorship and 'audience_age_group' in audience:
required_ages = sponsorship['required_audience'].get('age_group', [])
creator_ages = audience.get('audience_age_group', {})
overlap = sum([creator_ages.get(age, 0) for age in required_ages])
if overlap > 0:
match_score += 1
# Audience location overlap
if 'required_audience' in sponsorship and 'audience_location' in audience:
required_locs = sponsorship['required_audience'].get('location', [])
creator_locs = audience.get('audience_location', {})
overlap = sum([creator_locs.get(loc, 0) for loc in required_locs])
if overlap > 0:
match_score += 1
# Engagement rate
if audience.get('engagement_rate', 0) >= sponsorship.get('engagement_minimum', 0):
match_score += 1
# Price expectation
if audience.get('price_expectation', 0) <= sponsorship.get('budget', 0):
match_score += 1
if match_score >= 2: # Threshold for a match
matches.append({"sponsorship_id": sponsorship["id"], "match_score": match_score, **sponsorship})
return matches
def match_brands_for_creator(creator_id: str) -> List[Dict[str, Any]]:
try:
# Fetch creator's audience insights
audience_resp = supabase.table("audience_insights").select("*").eq("user_id", creator_id).execute()
if not audience_resp.data:
return []
audience = audience_resp.data[0]
# TODO: Add filtering based on creator's criteria (e.g., active sponsorships only)
# Fetch all sponsorships
sponsorships_resp = supabase.table("sponsorships").select("*").execute()
except Exception as e:
print(f"Error fetching data: {e}")
return []
matches = []
for sponsorship in sponsorships_resp.data:
match_score = calculate_match_score(sponsorship, audience)
if match_score >= 2: # Threshold for a match
matches.append({"sponsorship_id": sponsorship["id"], "match_score": match_score, **sponsorship})
return matches
🧰 Tools
🪛 Pylint (3.3.7)

[refactor] 67-67: Consider using a generator instead 'sum(creator_ages.get(age, 0) for age in required_ages)'

(R1728)


[refactor] 74-74: Consider using a generator instead 'sum(creator_locs.get(loc, 0) for loc in required_locs)'

(R1728)

🤖 Prompt for AI Agents
In Backend/app/services/db_service.py from lines 51 to 85, the
match_brands_for_creator function duplicates matching logic and fetches all
sponsorships without filtering, which impacts performance. Refactor this
function to use the existing calculate_match_score function for matching logic
to avoid duplication. Additionally, optimize database queries by adding filters
to fetch only active sponsorships, those within the creator's price expectation,
and matching relevant categories to reduce the data volume processed and improve
efficiency.

8 changes: 4 additions & 4 deletions Backend/sql.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
-- Insert into users table
INSERT INTO users (id, username, email, password_hash, role, profile_image, bio, created_at) VALUES
(gen_random_uuid(), 'creator1', 'creator1@example.com', 'hashedpassword1', 'creator', 'image1.jpg', 'Bio of creator1', NOW()),
(gen_random_uuid(), 'brand1', 'brand1@example.com', 'hashedpassword2', 'brand', 'image2.jpg', 'Bio of brand1', NOW()),
(gen_random_uuid(), 'creator2', 'creator2@example.com', 'hashedpassword3', 'creator', 'image3.jpg', 'Bio of creator2', NOW());
INSERT INTO users (id, username, email, role, profile_image, bio, created_at) VALUES
(gen_random_uuid(), 'creator1', 'creator1@example.com', 'creator', 'image1.jpg', 'Bio of creator1', NOW()),
(gen_random_uuid(), 'brand1', 'brand1@example.com', 'brand', 'image2.jpg', 'Bio of brand1', NOW()),
(gen_random_uuid(), 'creator2', 'creator2@example.com', 'creator', 'image3.jpg', 'Bio of creator2', NOW());

-- Insert into audience_insights table
INSERT INTO audience_insights (id, user_id, audience_age_group, audience_location, engagement_rate, average_views, time_of_attention, price_expectation, created_at) VALUES
Expand Down
3 changes: 2 additions & 1 deletion Frontend/env-example
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
VITE_SUPABASE_URL=https://your-project.supabase.co
VITE_SUPABASE_ANON_KEY=your-anon-key-here
VITE_SUPABASE_ANON_KEY=your-anon-key-here
VITE_YOUTUBE_API_KEY=your-youtube-api-key-here
Binary file added Frontend/public/brand.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Frontend/public/contnetcreator.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Frontend/public/facebook.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Frontend/public/instagram.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Frontend/public/tiktok.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Frontend/public/youtube.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading