Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,17 @@
/.idea/vcs.xml
/backend/agents/writer.py
/gpt-marketer/
/frontend/static/outputs/

.env
.venv/

backend/__pycache__/
backend/agents/__pycache__/
backend/agents/models/__pycache__/
backend/agents/models/spam_model.py
backend/agents/models/spam_model.pyc
backend/agents/models/spam_model.pyc



1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.11
10 changes: 7 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,17 +55,21 @@ These agents work together to create a tailored B2B email marketing campaign tha
export TAVILY_API_KEY=<YOUR_TAVILY_API_KEY>
export OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
```
3. Install Requirements
3. Install Requirements (using [uv](https://docs.astral.sh/uv/))
```sh
pip install -r requirements.txt
uv pip install
```
Or, to sync with your lockfile and set up the environment:
```sh
uv sync
```
4. Configure Target Audience and Products
```sh
# Add your target companies and product details to the vector database.
```
5. Run the Marketer
```sh
python marketer.py
uv run app.py
```
6. Watch Marketer in Action

Expand Down
Empty file added backend/__init__.py
Empty file.
6 changes: 3 additions & 3 deletions backend/agents/critique.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import sys
from pathlib import Path
from datetime import datetime
from langchain.adapters.openai import convert_openai_messages
from langchain_openai import ChatOpenAI
from langchain_community.adapters.openai import convert_openai_messages
from langchain_anthropic import ChatAnthropic
from .models.spam_model import SpamClassifier
import os

Expand Down Expand Up @@ -63,7 +63,7 @@ def critique(self, article: dict):
}]

lc_messages = convert_openai_messages(prompt)
response = ChatOpenAI(model='gpt-4', max_retries=1, max_tokens=400).invoke(lc_messages).content
response = ChatAnthropic(model='claude-3-5-sonnet-20240620', max_retries=1, api_key=os.getenv("ANTHROPIC_API_KEY")).invoke(lc_messages).content
number_of_revisions = article.get('number_of_revisions')
if response == 'None' or number_of_revisions == "1" or number_of_revisions == 1: # deterministic approach
return {'critique': None}
Expand Down
2 changes: 1 addition & 1 deletion backend/agents/designer.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def run(self, email: dict):
composite_image.save(composite_image_path)
# print(self.output_dir)
# Image URL is saved in email dictionary
email['merged_logos'] = f'{composite_image_path.replace('frontend/static/', '')}'
email['merged_logos'] = f'{composite_image_path.replace("frontend/static/", "")}'
# print(email['merged_logos'])

# Update the HTML with the new image
Expand Down
2 changes: 2 additions & 0 deletions backend/agents/search.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from tavily import TavilyClient
import os
from dotenv import load_dotenv

load_dotenv()
tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))


Expand Down
12 changes: 7 additions & 5 deletions backend/agents/writer.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from datetime import datetime
from langchain.adapters.openai import convert_openai_messages
from langchain_openai import ChatOpenAI
from langchain_community.adapters.openai import convert_openai_messages
from langchain_anthropic import ChatAnthropic
import json5 as json

from dotenv import load_dotenv
import os
load_dotenv()
sample_json = """
{
"subject": subject of the email,
Expand Down Expand Up @@ -47,7 +49,7 @@ def writer(self, email: dict):
"response_format": {"type": "json_object"}
}

response = ChatOpenAI(model='gpt-4-0125-preview', max_retries=1, model_kwargs=optional_params).invoke(
response = ChatAnthropic(model='claude-3-5-sonnet-20240620', max_retries=1, api_key=os.getenv("ANTHROPIC_API_KEY")).invoke(
lc_messages).content
return json.loads(response)

Expand Down Expand Up @@ -78,7 +80,7 @@ def revise(self, email: dict):
"response_format": {"type": "json_object"}
}

response = ChatOpenAI(model='gpt-4-0125-preview', max_retries=1, model_kwargs=optional_params).invoke(
response = ChatAnthropic(model='claude-3-5-sonnet-20240620', max_retries=1, api_key=os.getenv("ANTHROPIC_API_KEY")).invoke(
lc_messages).content
response = json.loads(response)
print(f"For article: {email['title']}")
Expand Down
8 changes: 5 additions & 3 deletions backend/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,11 @@ def run(self, data: dict):
# Set up edges
workflow.add_edge('search', 'write')
workflow.add_edge('write', 'critique')
workflow.add_conditional_edges(start_key='critique',
condition=lambda x: "accept" if x['critique'] is None else "revise",
conditional_edge_mapping={"accept": "design", "revise": "write"})
workflow.add_conditional_edges(
source='critique',
path=lambda x: "accept" if x['critique'] is None else "revise",
path_map={"accept": "design", "revise": "write"}
)

# set up start and end nodes
workflow.set_entry_point("search")
Expand Down
Binary file added backend/~$leads_list.xlsx
Binary file not shown.
83 changes: 83 additions & 0 deletions mcp_server/client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import asyncio
import os
import logging
import warnings
from dotenv import load_dotenv
from langchain_core.messages import HumanMessage
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain_anthropic import ChatAnthropic
from mcp import ClientSession
from mcp.client.sse import sse_client
from langchain import hub
from langgraph.prebuilt import create_react_agent

warnings.filterwarnings("ignore", category=ResourceWarning)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Load environment variables from .env
load_dotenv()
# Get environment variables for Anthropic and Tavily
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")

# Set up the same server parameters as Cursor
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
server_script = os.path.join(project_dir, "mcp_server", "server.py")


company_details = {
"leads": {
"0": {
"name": "Assaf Elovic",
"email": "assaf.elovic@gmail.com",
"title": "CTO",
"company": "Tavily",
}
},
"product_description": """""",
"user_company": "Tavily",
"user_email": "rotem@tavily.com",
"user_first_name": "Rotem",
"user_last_name": "Weiss",
"logo": "https://yyz2.discourse-cdn.com/flex004/user_avatar/community.tavily.com/system/144/107_2.png",
}

llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0)


async def main():
"""
Connect to the MCP server using SSE, load tools, and run the agent to given content of emails generated by the server.
"""
# Connect to the MCP server using SSE
async with sse_client(url="http://localhost:8000/sse") as (read, write):
async with ClientSession(read_stream=read, write_stream=write) as session:
await session.initialize()
logger.info("Session initialized")

# Load available tools from the MCP session
tools = await load_mcp_tools(session)
logger.info("Tools loaded")
logger.info(tools)

# Create the agent executor
agent_executor = create_react_agent(llm, tools)

# Run the agent to generate emails
result = await agent_executor.ainvoke(
{
"messages": [
HumanMessage(
content="Give the content of the email generated using the following company details: "
+ str(company_details)
)
]
}
)
print(result["messages"][-1].content)


if __name__ == "__main__":
asyncio.run(main())
93 changes: 93 additions & 0 deletions mcp_server/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# GPT-Marketer MCP Server

This project provides an MCP (Model Context Protocol) server for marketing automation and email generation
## Configuration

Add the following configuration for cursor:

```json
{
"mcpServers": {
"gpt-marketer-mcp": {
"url": "http://localhost:8000/sse",
"env":{
"PYTHONPATH": "",
"ANTHROPIC_API_KEY": "",
"TAVILY_API_KEY": ""
},
"environment":{
"name": "gpt-marketer-mcp",
"type": "venv",
"path": "/.venv"
}
}
}
}
```

- `url`: The SSE endpoint for the MCP server.
- `env`: Environment variables required for the server to function. Set your API keys here.
- `environment`: Python virtual environment configuration.

---

## How to Use in Cursor

1. **Add the MCP Server**
- Open Cursor and go to the MCP integration settings.
- Add a new MCP server with the following details:
- **Name**: `gpt-marketer-mcp`
- **URL**: `http://localhost:8000/sse`
- **Environment Variables**: Set your `ANTHROPIC_API_KEY` and `TAVILY_API_KEY`.
- **Python Environment**: Point to your virtual environment path (e.g., `/.venv`).

2. **Connect and Use**
- Once added, connect to the server from Cursor.
- You can now use the tools and capabilities provided by the GPT-Marketer MCP server directly within Cursor

---

## How to Run Using `client.py`

1. **Install Dependencies**
- Ensure you have Python 3.8+ and a virtual environment set up.
- Install dependencies using [uv](https://docs.astral.sh/uv/):
```bash
uv pip install -r requirements.txt
```
- Or, if you have a `pyproject.toml` file, you can use:
```bash
uv pip install
```
- For project-based dependency management and environment setup:
```bash
uv sync
```

2. **Set Environment Variables**
- Create a `.env` file in the project root:
```env
ANTHROPIC_API_KEY=your-anthropic-key
TAVILY_API_KEY=your-tavily-key
```

3. **Start the MCP Server**
- Run the server (if not already running):
```bash
uv run .\mcp_server\server.py
```

4. **Run the Client**
- In a new terminal, run:
```bash
uv run .\mcp_server\client.py
```
- The client will connect to the MCP server, load available tools, and allow you to generate email content or perform other marketing tasks.

---

## About MCP

Model Context Protocol (MCP) is an open protocol for connecting AI applications to data sources and tools. MCP servers expose tools and resources, while MCP clients (like this project) connect to those servers to enable powerful AI-driven workflows.

For more information, see the [MCP documentation](https://modelcontextprotocol.io/introduction).
39 changes: 39 additions & 0 deletions mcp_server/server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import logging
from mcp.server.fastmcp import FastMCP
from dotenv import load_dotenv
from backend.main import MasterAgent
import pandas as pd
import os

load_dotenv()

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

mcp = FastMCP("gpt-marketer-mcp")


@mcp.tool()
async def email_content(company_data: dict):
"""
Give the content of the email generated using the company data:
Args:
company_data: The company data to generate emails for
Returns:
html_content: html content of the email
"""
master_agent = MasterAgent()
try:
master_agent.run(company_data)
with open(os.path.join(master_agent.output_dir, "email.html"), "r") as f:
html_content = f.read()
logger.info(f"Email content generated: {html_content}")

return html_content
except Exception as e:
logger.error(f"Error generating email content: {e}")
return "Error generating email content"


if __name__ == "__main__":
mcp.run(transport="sse")
26 changes: 26 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
[project]
name = "gpt-marketer"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"black>=25.1.0",
"flask>=3.1.1",
"flask-cors>=6.0.0",
"joblib>=1.5.0",
"json5>=0.12.0",
"langchain>=0.3.25",
"langchain-anthropic>=0.3.13",
"langchain-community>=0.3.24",
"langchain-mcp-adapters>=0.1.1",
"langchain-openai>=0.3.17",
"langgraph>=0.4.5",
"mcp[cli]>=1.9.0",
"openpyxl>=3.1.5",
"pandas>=2.2.3",
"pillow>=11.2.1",
"python-dotenv>=1.1.0",
"scikit-learn>=1.6.1",
"tavily-python>=0.7.2",
]
Loading