diff --git a/.env.template b/.env.template
index ffdc5bb6e2..fd00401294 100644
--- a/.env.template
+++ b/.env.template
@@ -1,5 +1,9 @@
-### OpenAI Setup ###
+# Email Configuration
+SMTP_SERVER=smtp.gmail.com
+SMTP_PORT=587
+SMTP_USERNAME=your-email@gmail.com
+SMTP_PASSWORD=your-app-password
+FROM_EMAIL=noreply@gptengineer.app
-# OPENAI_API_KEY=Your personal OpenAI API key from https://platform.openai.com/account/api-keys
-OPENAI_API_KEY=...
-ANTHROPIC_API_KEY=...
+# Database
+DATABASE_URL=sqlite:///gpt_engineer.db
\ No newline at end of file
diff --git a/frontend/src/components/changeemailform.jsx b/frontend/src/components/changeemailform.jsx
new file mode 100644
index 0000000000..73ae0d5098
--- /dev/null
+++ b/frontend/src/components/changeemailform.jsx
@@ -0,0 +1,97 @@
+import React, { useState } from 'react';
+
+const ChangeEmailForm = ({ userId }) => {
+ const [currentPassword, setCurrentPassword] = useState('');
+ const [newEmail, setNewEmail] = useState('');
+ const [message, setMessage] = useState('');
+ const [loading, setLoading] = useState(false);
+
+ const handleSubmit = async (e) => {
+ e.preventDefault();
+ setLoading(true);
+ setMessage('');
+
+ try {
+ const response = await fetch('/api/user/change-email', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({
+ user_id: userId,
+ new_email: newEmail,
+ current_password: currentPassword
+ }),
+ });
+
+ const data = await response.json();
+
+ if (response.ok) {
+ setMessage(data.message);
+ setNewEmail('');
+ setCurrentPassword('');
+ } else {
+ setMessage(data.error);
+ }
+ } catch (error) {
+ setMessage('Network error. Please try again.');
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ return (
+
+
Change Email Address
+
+
+
+
How it works:
+
+ - Enter your new email address and current password
+ - We'll send a verification link to your new email
+ - Click the link to verify and complete the change
+ - You'll receive notifications on both old and new emails
+
+
+
+ );
+};
+
+export default ChangeEmailForm;
\ No newline at end of file
diff --git a/gpt_engineer/core/ai.py b/gpt_engineer/core/ai.py
index ae86f63364..cebb7352ae 100644
--- a/gpt_engineer/core/ai.py
+++ b/gpt_engineer/core/ai.py
@@ -1,437 +1,77 @@
-"""
-AI Module
-
-This module provides an AI class that interfaces with language models to perform various tasks such as
-starting a conversation, advancing the conversation, and handling message serialization. It also includes
-backoff strategies for handling rate limit errors from the OpenAI API.
-
-Classes:
- AI: A class that interfaces with language models for conversation management and message serialization.
-
-Functions:
- serialize_messages(messages: List[Message]) -> str
- Serialize a list of messages to a JSON string.
-"""
-
-from __future__ import annotations
-
+from flask import request, jsonify
import json
-import logging
-import os
-
-from pathlib import Path
-from typing import Any, List, Optional, Union
-
-import backoff
-import openai
-import pyperclip
-
-from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
-from langchain.chat_models.base import BaseChatModel
-from langchain.schema import (
- AIMessage,
- HumanMessage,
- SystemMessage,
- messages_from_dict,
- messages_to_dict,
-)
-from langchain_anthropic import ChatAnthropic
-from langchain_openai import AzureChatOpenAI, ChatOpenAI
-
-from gpt_engineer.core.token_usage import TokenUsageLog
-
-# Type hint for a chat message
-Message = Union[AIMessage, HumanMessage, SystemMessage]
-
-# Set up logging
-logger = logging.getLogger(__name__)
-
-
-class AI:
- """
- A class that interfaces with language models for conversation management and message serialization.
-
- This class provides methods to start and advance conversations, handle message serialization,
- and implement backoff strategies for rate limit errors when interacting with the OpenAI API.
-
- Attributes
- ----------
- temperature : float
- The temperature setting for the language model.
- azure_endpoint : str
- The endpoint URL for the Azure-hosted language model.
- model_name : str
- The name of the language model to use.
- streaming : bool
- A flag indicating whether to use streaming for the language model.
- llm : BaseChatModel
- The language model instance for conversation management.
- token_usage_log : TokenUsageLog
- A log for tracking token usage during conversations.
-
- Methods
- -------
- start(system: str, user: str, step_name: str) -> List[Message]
- Start the conversation with a system message and a user message.
- next(messages: List[Message], prompt: Optional[str], step_name: str) -> List[Message]
- Advances the conversation by sending message history to LLM and updating with the response.
- backoff_inference(messages: List[Message]) -> Any
- Perform inference using the language model with an exponential backoff strategy.
- serialize_messages(messages: List[Message]) -> str
- Serialize a list of messages to a JSON string.
- deserialize_messages(jsondictstr: str) -> List[Message]
- Deserialize a JSON string to a list of messages.
- _create_chat_model() -> BaseChatModel
- Create a chat model with the specified model name and temperature.
- """
-
- def __init__(
- self,
- model_name="gpt-4-turbo",
- temperature=0.1,
- azure_endpoint=None,
- streaming=True,
- vision=False,
- ):
- """
- Initialize the AI class.
-
- Parameters
- ----------
- model_name : str, optional
- The name of the model to use, by default "gpt-4".
- temperature : float, optional
- The temperature to use for the model, by default 0.1.
- """
- self.temperature = temperature
- self.azure_endpoint = azure_endpoint
- self.model_name = model_name
- self.streaming = streaming
- self.vision = (
- ("vision-preview" in model_name)
- or ("gpt-4-turbo" in model_name and "preview" not in model_name)
- or ("claude" in model_name)
- )
- self.llm = self._create_chat_model()
- self.token_usage_log = TokenUsageLog(model_name)
-
- logger.debug(f"Using model {self.model_name}")
-
- def start(self, system: str, user: Any, *, step_name: str) -> List[Message]:
- """
- Start the conversation with a system message and a user message.
-
- Parameters
- ----------
- system : str
- The content of the system message.
- user : str
- The content of the user message.
- step_name : str
- The name of the step.
-
- Returns
- -------
- List[Message]
- The list of messages in the conversation.
- """
-
- messages: List[Message] = [
- SystemMessage(content=system),
- HumanMessage(content=user),
- ]
- return self.next(messages, step_name=step_name)
-
- def _extract_content(self, content):
- """
- Extracts text content from a message, supporting both string and list types.
- Parameters
- ----------
- content : Union[str, List[dict]]
- The content of a message, which could be a string or a list.
- Returns
- -------
- str
- The extracted text content.
- """
- if isinstance(content, str):
- return content
- elif isinstance(content, list) and content and "text" in content[0]:
- # Assuming the structure of list content is [{'type': 'text', 'text': 'Some text'}, ...]
- return content[0]["text"]
- else:
- return ""
-
- def _collapse_text_messages(self, messages: List[Message]):
- """
- Combine consecutive messages of the same type into a single message, where if the message content
- is a list type, the first text element's content is taken. This method keeps `combined_content` as a string.
-
- This method iterates through the list of messages, combining consecutive messages of the same type
- by joining their content with a newline character. If the content is a list, it extracts text from the first
- text element's content. This reduces the number of messages and simplifies the conversation for processing.
- Parameters
- ----------
- messages : List[Message]
- The list of messages to collapse.
-
- Returns
- -------
- List[Message]
- The list of messages after collapsing consecutive messages of the same type.
- """
- collapsed_messages = []
- if not messages:
- return collapsed_messages
-
- previous_message = messages[0]
- combined_content = self._extract_content(previous_message.content)
-
- for current_message in messages[1:]:
- if current_message.type == previous_message.type:
- combined_content += "\n\n" + self._extract_content(
- current_message.content
- )
+class UserAPI:
+ def __init__(self, memory):
+ self.memory = memory
+
+ def handle_email_change_request(self):
+ data = request.get_json()
+ user_id = data.get('user_id')
+ new_email = data.get('new_email')
+ current_password = data.get('current_password') # If you have auth
+
+ try:
+ user = self.memory.get_user(user_id)
+ if not user:
+ return jsonify({"error": "User not found"}), 404
+
+ # Verify current password if authentication exists
+ if hasattr(user, 'verify_password'):
+ if not user.verify_password(current_password):
+ return jsonify({"error": "Invalid password"}), 401
+
+ user.request_email_change(new_email)
+
+ # Save user state (you'll need to implement this)
+ self.memory.save_user(user)
+
+ # Send verification email (implement this)
+ self._send_verification_email(user, new_email)
+
+ return jsonify({
+ "message": "Verification email sent to new email address"
+ }), 200
+
+ except ValueError as e:
+ return jsonify({"error": str(e)}), 400
+ except Exception as e:
+ return jsonify({"error": "Internal server error"}), 500
+
+ def handle_email_verification(self, token: str):
+ try:
+ user = self.memory.get_user_by_token(token)
+ if not user:
+ return jsonify({"error": "Invalid or expired token"}), 400
+
+ if user.verify_email_change(token):
+ self.memory.save_user(user)
+ return jsonify({
+ "message": "Email updated successfully"
+ }), 200
else:
- collapsed_messages.append(
- previous_message.__class__(content=combined_content)
- )
- previous_message = current_message
- combined_content = self._extract_content(current_message.content)
-
- collapsed_messages.append(previous_message.__class__(content=combined_content))
- return collapsed_messages
-
- def next(
- self,
- messages: List[Message],
- prompt: Optional[str] = None,
- *,
- step_name: str,
- ) -> List[Message]:
- """
- Advances the conversation by sending message history
- to LLM and updating with the response.
-
- Parameters
- ----------
- messages : List[Message]
- The list of messages in the conversation.
- prompt : Optional[str], optional
- The prompt to use, by default None.
- step_name : str
- The name of the step.
-
- Returns
- -------
- List[Message]
- The updated list of messages in the conversation.
- """
-
- if prompt:
- messages.append(HumanMessage(content=prompt))
-
- logger.debug(
- "Creating a new chat completion: %s",
- "\n".join([m.pretty_repr() for m in messages]),
- )
-
- if not self.vision:
- messages = self._collapse_text_messages(messages)
-
- response = self.backoff_inference(messages)
-
- self.token_usage_log.update_log(
- messages=messages, answer=response.content, step_name=step_name
- )
- messages.append(response)
- logger.debug(f"Chat completion finished: {messages}")
-
- return messages
-
- @backoff.on_exception(backoff.expo, openai.RateLimitError, max_tries=7, max_time=45)
- def backoff_inference(self, messages):
- """
- Perform inference using the language model while implementing an exponential backoff strategy.
-
- This function will retry the inference in case of a rate limit error from the OpenAI API.
- It uses an exponential backoff strategy, meaning the wait time between retries increases
- exponentially. The function will attempt to retry up to 7 times within a span of 45 seconds.
-
- Parameters
- ----------
- messages : List[Message]
- A list of chat messages which will be passed to the language model for processing.
-
- callbacks : List[Callable]
- A list of callback functions that are triggered after each inference. These functions
- can be used for logging, monitoring, or other auxiliary tasks.
-
- Returns
- -------
- Any
- The output from the language model after processing the provided messages.
-
- Raises
- ------
- openai.error.RateLimitError
- If the number of retries exceeds the maximum or if the rate limit persists beyond the
- allotted time, the function will ultimately raise a RateLimitError.
-
- Example
- -------
- >>> messages = [SystemMessage(content="Hello"), HumanMessage(content="How's the weather?")]
- >>> response = backoff_inference(messages)
- """
- return self.llm.invoke(messages) # type: ignore
-
- @staticmethod
- def serialize_messages(messages: List[Message]) -> str:
- """
- Serialize a list of messages to a JSON string.
-
- Parameters
- ----------
- messages : List[Message]
- The list of messages to serialize.
-
- Returns
- -------
- str
- The serialized messages as a JSON string.
- """
- return json.dumps(messages_to_dict(messages))
-
- @staticmethod
- def deserialize_messages(jsondictstr: str) -> List[Message]:
- """
- Deserialize a JSON string to a list of messages.
-
- Parameters
- ----------
- jsondictstr : str
- The JSON string to deserialize.
-
- Returns
- -------
- List[Message]
- The deserialized list of messages.
- """
- data = json.loads(jsondictstr)
- # Modify implicit is_chunk property to ALWAYS false
- # since Langchain's Message schema is stricter
- prevalidated_data = [
- {**item, "tools": {**item.get("tools", {}), "is_chunk": False}}
- for item in data
- ]
- return list(messages_from_dict(prevalidated_data)) # type: ignore
-
- def _create_chat_model(self) -> BaseChatModel:
- """
- Create a chat model with the specified model name and temperature.
-
- Parameters
- ----------
- model : str
- The name of the model to create.
- temperature : float
- The temperature to use for the model.
-
- Returns
- -------
- BaseChatModel
- The created chat model.
- """
- if self.azure_endpoint:
- return AzureChatOpenAI(
- azure_endpoint=self.azure_endpoint,
- openai_api_version=os.getenv(
- "OPENAI_API_VERSION", "2024-05-01-preview"
- ),
- deployment_name=self.model_name,
- openai_api_type="azure",
- streaming=self.streaming,
- callbacks=[StreamingStdOutCallbackHandler()],
- )
- elif "claude" in self.model_name:
- return ChatAnthropic(
- model=self.model_name,
- temperature=self.temperature,
- callbacks=[StreamingStdOutCallbackHandler()],
- streaming=self.streaming,
- max_tokens_to_sample=4096,
- )
- elif self.vision:
- return ChatOpenAI(
- model=self.model_name,
- temperature=self.temperature,
- streaming=self.streaming,
- callbacks=[StreamingStdOutCallbackHandler()],
- max_tokens=4096, # vision models default to low max token limits
- )
- else:
- return ChatOpenAI(
- model=self.model_name,
- temperature=self.temperature,
- streaming=self.streaming,
- callbacks=[StreamingStdOutCallbackHandler()],
- )
-
-
-def serialize_messages(messages: List[Message]) -> str:
- return AI.serialize_messages(messages)
-
-
-class ClipboardAI(AI):
- # Ignore not init superclass
- def __init__(self, **_): # type: ignore
- self.vision = False
- self.token_usage_log = TokenUsageLog("clipboard_llm")
-
- @staticmethod
- def serialize_messages(messages: List[Message]) -> str:
- return "\n\n".join([f"{m.type}:\n{m.content}" for m in messages])
-
- @staticmethod
- def multiline_input():
- print("Enter/Paste your content. Ctrl-D or Ctrl-Z ( windows ) to save it.")
- content = []
- while True:
- try:
- line = input()
- except EOFError:
- break
- content.append(line)
- return "\n".join(content)
-
- def next(
- self,
- messages: List[Message],
- prompt: Optional[str] = None,
- *,
- step_name: str,
- ) -> List[Message]:
- """
- Not yet fully supported
- """
- if prompt:
- messages.append(HumanMessage(content=prompt))
-
- logger.debug(f"Creating a new chat completion: {messages}")
-
- msgs = self.serialize_messages(messages)
- pyperclip.copy(msgs)
- Path("clipboard.txt").write_text(msgs)
- print(
- "Messages copied to clipboard and written to clipboard.txt,",
- len(msgs),
- "characters in total",
- )
-
- response = self.multiline_input()
-
- messages.append(AIMessage(content=response))
- logger.debug(f"Chat completion finished: {messages}")
-
- return messages
+ return jsonify({"error": "Invalid or expired token"}), 400
+
+ except Exception as e:
+ return jsonify({"error": "Internal server error"}), 500
+
+ def _send_verification_email(self, user, new_email):
+ # Implement email sending logic
+ # You can integrate with existing notification system
+ verification_url = f"https://yourapp.com/verify-email/{user.email_verification_token}"
+
+ email_content = f"""
+ Hi there,
+
+ You requested to change your email address to this one.
+
+ Please click the link below to verify your new email address:
+ {verification_url}
+
+ This link will expire in 24 hours.
+
+ If you didn't request this change, please ignore this email.
+ """
+
+ # Send email (implement based on your email service)
+ print(f"Verification email sent to {new_email}: {email_content}")
\ No newline at end of file
diff --git a/gpt_engineer/core/api/routers.py b/gpt_engineer/core/api/routers.py
new file mode 100644
index 0000000000..db29dc10f7
--- /dev/null
+++ b/gpt_engineer/core/api/routers.py
@@ -0,0 +1,61 @@
+# gpt-engineer/gpt_engineer/core/api/routers.py
+from flask import Blueprint, request, jsonify
+from ...user_service import UserService
+from ...email_service import EmailService
+from ...database.models import Session # Import your database session
+
+# Create blueprint
+user_bp = Blueprint('user', __name__, url_prefix='/api/user')
+
+# Initialize services
+db_session = Session() # Create database session
+email_service = EmailService() # Create email service
+user_service = UserService(db_session, email_service) # Initialize user service
+
+@user_bp.route('/change-email', methods=['POST'])
+def change_email():
+ """Request email change"""
+ data = request.get_json()
+
+ if not data:
+ return jsonify({"error": "No data provided"}), 400
+
+ user_id = data.get('user_id')
+ new_email = data.get('new_email')
+ current_password = data.get('current_password')
+
+ if not all([user_id, new_email]):
+ return jsonify({"error": "Missing required fields"}), 400
+
+ success, message = user_service.request_email_change(
+ user_id, new_email, current_password
+ )
+
+ if success:
+ return jsonify({"message": message}), 200
+ else:
+ return jsonify({"error": message}), 400
+
+@user_bp.route('/verify-email-change/', methods=['GET'])
+def verify_email_change(token):
+ """Verify email change"""
+ success, message = user_service.verify_email_change(token)
+
+ if success:
+ return jsonify({"message": message}), 200
+ else:
+ return jsonify({"error": message}), 400
+
+@user_bp.route('/cancel-email-change', methods=['POST'])
+def cancel_email_change():
+ """Cancel pending email change"""
+ data = request.get_json()
+ user_id = data.get('user_id')
+
+ if not user_id:
+ return jsonify({"error": "User ID required"}), 400
+
+ if user_service.cancel_email_change(user_id):
+ return jsonify({"message": "Email change cancelled"}), 200
+ else:
+ return jsonify({"error": "No pending email change found"}), 400
\ No newline at end of file
diff --git a/gpt_engineer/core/base_memory.py b/gpt_engineer/core/base_memory.py
index a491ae0aaf..9ad2f4b9db 100644
--- a/gpt_engineer/core/base_memory.py
+++ b/gpt_engineer/core/base_memory.py
@@ -1,15 +1,56 @@
-"""
-Base Memory Module
-
-This module provides a type alias for a mutable mapping that represents the base memory structure
-used in the GPT Engineer project. The base memory is a mapping from file names (as strings or Path objects)
-to their corresponding code content (as strings).
-
-Type Aliases:
- BaseMemory: A mutable mapping from file names to code content.
-"""
-
-from pathlib import Path
-from typing import MutableMapping, Union
-
-BaseMemory = MutableMapping[Union[str, Path], str]
+# Add user management with email change functionality
+class User:
+ def __init__(self, user_id: str, email: str, email_verified: bool = False):
+ self.user_id = user_id
+ self.email = email
+ self.email_verified = email_verified
+ self.pending_email = None
+ self.email_verification_token = None
+ self.token_created_at = None
+
+ def request_email_change(self, new_email: str) -> bool:
+ # Validate email format
+ if not self._is_valid_email(new_email):
+ raise ValueError("Invalid email format")
+
+ # Check if email is already in use (you'll need to implement this)
+ if self._email_exists(new_email):
+ raise ValueError("Email already registered")
+
+ # Generate verification token
+ self.email_verification_token = self._generate_verification_token()
+ self.pending_email = new_email
+ self.token_created_at = datetime.utcnow()
+
+ return True
+
+ def verify_email_change(self, token: str) -> bool:
+ if (self.email_verification_token == token and
+ self._is_token_valid()):
+
+ old_email = self.email
+ self.email = self.pending_email
+ self.pending_email = None
+ self.email_verification_token = None
+ self.token_created_at = None
+
+ # Send notifications
+ self._send_email_change_notifications(old_email, self.email)
+ return True
+
+ return False
+
+ def _is_valid_email(self, email: str) -> bool:
+ import re
+ pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
+ return re.match(pattern, email) is not None
+
+ def _generate_verification_token(self) -> str:
+ import secrets
+ return secrets.token_urlsafe(32)
+
+ def _is_token_valid(self) -> bool:
+ if not self.token_created_at:
+ return False
+ expiry_time = self.token_created_at + timedelta(hours=24)
+ return datetime.utcnow() < expiry_time
\ No newline at end of file
diff --git a/gpt_engineer/core/database/models.py b/gpt_engineer/core/database/models.py
new file mode 100644
index 0000000000..3eee768467
--- /dev/null
+++ b/gpt_engineer/core/database/models.py
@@ -0,0 +1,41 @@
+# database/models.py
+from sqlalchemy import create_engine, Column, String, Integer, DateTime, Boolean
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+from datetime import datetime
+import os
+
+Base = declarative_base()
+
+# Get database URL from environment or use SQLite as default
+DATABASE_URL = os.getenv('DATABASE_URL', 'sqlite:///gpt_engineer.db')
+engine = create_engine(DATABASE_URL)
+Session = sessionmaker(bind=engine)
+
+class User(Base):
+ __tablename__ = "users"
+
+ id = Column(Integer, primary_key=True)
+ email = Column(String(255), unique=True, nullable=False)
+ password_hash = Column(String(255))
+ email_verified = Column(Boolean, default=False)
+ created_at = Column(DateTime, default=datetime.utcnow)
+
+ # New fields for email change
+ pending_email = Column(String(255), nullable=True)
+ email_verification_token = Column(String(255), nullable=True)
+ token_created_at = Column(DateTime, nullable=True)
+
+class EmailChangeLog(Base):
+ __tablename__ = "email_change_logs"
+
+ id = Column(Integer, primary_key=True)
+ user_id = Column(Integer, nullable=False)
+ old_email = Column(String(255), nullable=False)
+ new_email = Column(String(255), nullable=False)
+ status = Column(String(50), default="pending")
+ created_at = Column(DateTime, default=datetime.utcnow)
+ verified_at = Column(DateTime, nullable=True)
+
+# Create tables
+Base.metadata.create_all(engine)
\ No newline at end of file
diff --git a/gpt_engineer/core/email_service.py b/gpt_engineer/core/email_service.py
new file mode 100644
index 0000000000..4c39dfccfb
--- /dev/null
+++ b/gpt_engineer/core/email_service.py
@@ -0,0 +1,91 @@
+import smtplib
+from email.mime.text import MimeText
+from email.mime.multipart import MimeMultipart
+import os
+from typing import Optional
+
+class EmailService:
+ def __init__(self):
+ self.smtp_server = os.getenv("SMTP_SERVER", "smtp.gmail.com")
+ self.smtp_port = int(os.getenv("SMTP_PORT", 587))
+ self.smtp_username = os.getenv("SMTP_USERNAME")
+ self.smtp_password = os.getenv("SMTP_PASSWORD")
+ self.from_email = os.getenv("FROM_EMAIL", "noreply@gptengineer.app")
+
+ def send_verification_email(self, new_email: str, token: str, user_id: int):
+ """Send verification email to new email address"""
+ verification_url = f"https://yourapp.com/verify-email-change/{token}"
+
+ subject = "Verify Your New Email Address"
+ body = f"""
+ Hi there,
+
+ You have requested to change your email address to this one.
+
+ Please click the link below to verify your new email address:
+ {verification_url}
+
+ This link will expire in 24 hours.
+
+ If you didn't request this change, please ignore this email and contact support.
+
+ Best regards,
+ GPT Engineer Team
+ """
+
+ self._send_email(new_email, subject, body)
+
+ def send_change_notification_old_email(self, old_email: str, new_email: str):
+ """Send notification to old email address"""
+ subject = "Important: Your Email Address is Being Changed"
+ body = f"""
+ Hi there,
+
+ This is to notify you that your email address is being changed from {old_email} to {new_email}.
+
+ If you didn't make this change, please contact support immediately.
+
+ Best regards,
+ GPT Engineer Team
+ """
+
+ self._send_email(old_email, subject, body)
+
+ def send_change_notification_new_email(self, new_email: str):
+ """Send welcome notification to new email address"""
+ subject = "Email Change Successful"
+ body = f"""
+ Hi there,
+
+ Your email address has been successfully updated to {new_email}.
+
+ You can now use this email to log into your account.
+
+ Best regards,
+ GPT Engineer Team
+ """
+
+ self._send_email(new_email, subject, body)
+
+ def _send_email(self, to_email: str, subject: str, body: str):
+ """Internal method to send email"""
+ try:
+ if not all([self.smtp_username, self.smtp_password]):
+ print(f"Email not sent (SMTP not configured): {subject} to {to_email}")
+ return
+
+ msg = MimeMultipart()
+ msg['From'] = self.from_email
+ msg['To'] = to_email
+ msg['Subject'] = subject
+
+ msg.attach(MimeText(body, 'plain'))
+
+ with smtplib.SMTP(self.smtp_server, self.smtp_port) as server:
+ server.starttls()
+ server.login(self.smtp_username, self.smtp_password)
+ server.send_message(msg)
+
+ print(f"Email sent: {subject} to {to_email}")
+ except Exception as e:
+ print(f"Failed to send email to {to_email}: {str(e)}")
\ No newline at end of file
diff --git a/gpt_engineer/core/project_config.py b/gpt_engineer/core/project_config.py
index 137a5558c8..523e09b720 100644
--- a/gpt_engineer/core/project_config.py
+++ b/gpt_engineer/core/project_config.py
@@ -1,158 +1,10 @@
-"""
-Functions for reading and writing the `gpt-engineer.toml` configuration file.
-
-The `gpt-engineer.toml` file is a TOML file that contains project-specific configuration used by the GPT Engineer CLI and gptengineer.app.
-"""
-from dataclasses import asdict, dataclass, field
-from pathlib import Path
-
-import tomlkit
-
-default_config_filename = "gpt-engineer.toml"
-
-example_config = """
-[run]
-build = "npm run build"
-test = "npm run test"
-lint = "quick-lint-js"
-
-[paths]
-base = "./frontend" # base directory to operate in (for monorepos)
-src = "./src" # source directory (under the base directory) from which context will be retrieved
-
-[gptengineer-app] # this namespace is used for gptengineer.app, may be used for internal experiments
-project_id = "..."
-
-# we support multiple OpenAPI schemas, used as context for the LLM
-openapi = [
- { url = "https://api.gptengineer.app/openapi.json" },
- { url = "https://some-color-translating-api/openapi.json" },
-]
-"""
-
-
-@dataclass
-class _PathsConfig:
- base: str | None = None
- src: str | None = None
-
-
-@dataclass
-class _RunConfig:
- build: str | None = None
- test: str | None = None
- lint: str | None = None
- format: str | None = None
-
-
-@dataclass
-class _OpenApiConfig:
- url: str
-
-
-@dataclass
-class _GptEngineerAppConfig:
- project_id: str
- openapi: list[_OpenApiConfig] | None = None
-
-
-def filter_none(d: dict) -> dict:
- # Drop None values and empty dictionaries from a dictionary
- return {
- k: v
- for k, v in (
- (k, filter_none(v) if isinstance(v, dict) else v)
- for k, v in d.items()
- if v is not None
- )
- if not (isinstance(v, dict) and not v) # Check for non-empty after filtering
- }
-
-
-@dataclass
-class Config:
- """Configuration for the GPT Engineer CLI and gptengineer.app via `gpt-engineer.toml`."""
-
- paths: _PathsConfig = field(default_factory=_PathsConfig)
- run: _RunConfig = field(default_factory=_RunConfig)
- gptengineer_app: _GptEngineerAppConfig | None = None
-
- @classmethod
- def from_toml(cls, config_file: Path | str):
- if isinstance(config_file, str):
- config_file = Path(config_file)
- config_dict = read_config(config_file)
- return cls.from_dict(config_dict)
-
- @classmethod
- def from_dict(cls, config_dict: dict):
- run = _RunConfig(**config_dict.get("run", {}))
- paths = _PathsConfig(**config_dict.get("paths", {}))
-
- # load optional gptengineer-app section
- gptengineer_app_dict = config_dict.get("gptengineer-app", {})
- gptengineer_app = None
- if gptengineer_app_dict:
- assert (
- "project_id" in gptengineer_app_dict
- ), "project_id is required in gptengineer-app section"
- gptengineer_app = _GptEngineerAppConfig(
- # required if gptengineer-app section is present
- project_id=gptengineer_app_dict["project_id"],
- openapi=[
- _OpenApiConfig(**openapi)
- for openapi in gptengineer_app_dict.get("openapi", [])
- ]
- or None,
- )
-
- return cls(paths=paths, run=run, gptengineer_app=gptengineer_app)
-
- def to_dict(self) -> dict:
- d = asdict(self)
- d["gptengineer-app"] = d.pop("gptengineer_app", None)
-
- # Drop None values and empty dictionaries
- # Needed because tomlkit.dumps() doesn't handle None values,
- # and we don't want to write empty sections.
- d = filter_none(d)
-
- return d
-
- def to_toml(self, config_file: Path | str, save=True) -> str:
- """Write the configuration to a TOML file."""
- if isinstance(config_file, str):
- config_file = Path(config_file)
-
- # Load the TOMLDocument and overwrite it with the new values
- config = read_config(config_file)
- default_config = Config().to_dict()
- for k, v in self.to_dict().items():
- # only write values that are already explicitly set, or that differ from defaults
- if k in config or v != default_config[k]:
- if isinstance(v, dict):
- config[k] = {
- k2: v2
- for k2, v2 in v.items()
- if (
- k2 in config[k]
- or default_config.get(k) is None
- or v2 != default_config[k].get(k2)
- )
- }
- else:
- config[k] = v
-
- toml_str = tomlkit.dumps(config)
- if save:
- with open(config_file, "w") as f:
- f.write(toml_str)
-
- return toml_str
-
-
-def read_config(config_file: Path) -> tomlkit.TOMLDocument:
- """Read the configuration file"""
- assert config_file.exists(), f"Config file {config_file} does not exist"
- with open(config_file, "r") as f:
- return tomlkit.load(f)
+class ProjectConfig:
+ def __init__(self):
+ self.allow_email_changes = True
+ self.email_verification_required = True
+ self.email_token_expiry_hours = 24
+
+ def validate_email_change_config(self):
+ # Add configuration validation
+ if not hasattr(self, 'smtp_server') and self.allow_email_changes:
+ print("Warning: Email changes enabled but SMTP server not configured")
\ No newline at end of file
diff --git a/gpt_engineer/core/user_service.py b/gpt_engineer/core/user_service.py
new file mode 100644
index 0000000000..54f384c421
--- /dev/null
+++ b/gpt_engineer/core/user_service.py
@@ -0,0 +1,156 @@
+import re
+import secrets
+from datetime import datetime, timedelta
+from typing import Optional, Tuple
+from .database.models import User, EmailChangeLog
+
+class UserService:
+ def __init__(self, db_session, email_service):
+ self.db = db_session
+ self.email_service = email_service
+
+ def request_email_change(self, user_id: int, new_email: str, current_password: str) -> Tuple[bool, str]:
+ """Initiate email change process"""
+ try:
+ # Get user
+ user = self.db.query(User).filter(User.id == user_id).first()
+ if not user:
+ return False, "User not found"
+
+ # Verify current password (if applicable)
+ if hasattr(user, 'verify_password'):
+ if not user.verify_password(current_password):
+ return False, "Current password is incorrect"
+
+ # Validate new email format
+ if not self._is_valid_email(new_email):
+ return False, "Invalid email format"
+
+ # Check if new email is same as current
+ if user.email.lower() == new_email.lower():
+ return False, "New email cannot be the same as current email"
+
+ # Check if email already exists
+ existing_user = self.db.query(User).filter(User.email == new_email).first()
+ if existing_user and existing_user.id != user_id:
+ return False, "Email already registered"
+
+ # Generate verification token
+ token = secrets.token_urlsafe(32)
+
+ # Update user record
+ user.pending_email = new_email
+ user.email_verification_token = token
+ user.token_created_at = datetime.utcnow()
+
+ # Log the email change request
+ change_log = EmailChangeLog(
+ user_id=user_id,
+ old_email=user.email,
+ new_email=new_email,
+ status="pending"
+ )
+ self.db.add(change_log)
+ self.db.commit()
+
+ # Send verification email
+ self.email_service.send_verification_email(new_email, token, user_id)
+
+ # Send notification to old email
+ self.email_service.send_change_notification_old_email(user.email, new_email)
+
+ return True, "Verification email sent to your new email address"
+
+ except Exception as e:
+ self.db.rollback()
+ return False, f"Error: {str(e)}"
+
+ def verify_email_change(self, token: str) -> Tuple[bool, str]:
+ """Verify and complete email change"""
+ try:
+ user = self.db.query(User).filter(
+ User.email_verification_token == token
+ ).first()
+
+ if not user:
+ return False, "Invalid or expired verification token"
+
+ # Check token expiration (24 hours)
+ if not user.token_created_at or \
+ datetime.utcnow() > user.token_created_at + timedelta(hours=24):
+ return False, "Verification token has expired"
+
+ if not user.pending_email:
+ return False, "No pending email change found"
+
+ # Update user email
+ old_email = user.email
+ user.email = user.pending_email
+ user.pending_email = None
+ user.email_verification_token = None
+ user.token_created_at = None
+ user.email_verified = True
+
+ # Update email change log
+ change_log = self.db.query(EmailChangeLog).filter(
+ EmailChangeLog.user_id == user.id,
+ EmailChangeLog.new_email == user.email,
+ EmailChangeLog.status == "pending"
+ ).first()
+
+ if change_log:
+ change_log.status = "verified"
+ change_log.verified_at = datetime.utcnow()
+
+ self.db.commit()
+
+ # Send success notification to new email
+ self.email_service.send_change_notification_new_email(user.email)
+
+ return True, "Email updated successfully"
+
+ except Exception as e:
+ self.db.rollback()
+ return False, f"Error: {str(e)}"
+
+ def cancel_email_change(self, user_id: int) -> bool:
+ """Cancel pending email change"""
+ try:
+ user = self.db.query(User).filter(User.id == user_id).first()
+ if user and user.pending_email:
+ user.pending_email = None
+ user.email_verification_token = None
+ user.token_created_at = None
+ self.db.commit()
+ return True
+ return False
+ except Exception:
+ self.db.rollback()
+ return False
+
+ def _is_valid_email(self, email: str) -> bool:
+ """Validate email format"""
+ pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
+ return re.match(pattern, email) is not None
+
+ def cleanup_expired_tokens(self):
+ """Clean up expired verification tokens"""
+ try:
+ expiry_time = datetime.utcnow() - timedelta(hours=24)
+
+ # Find users with expired tokens
+ expired_users = self.db.query(User).filter(
+ User.token_created_at < expiry_time,
+ User.pending_email.isnot(None)
+ ).all()
+
+ for user in expired_users:
+ user.pending_email = None
+ user.email_verification_token = None
+ user.token_created_at = None
+
+ self.db.commit()
+ print(f"Cleaned up {len(expired_users)} expired tokens")
+
+ except Exception as e:
+ print(f"Error cleaning up tokens: {str(e)}")
\ No newline at end of file
diff --git a/main/app.py b/main/app.py
new file mode 100644
index 0000000000..36a25579ee
--- /dev/null
+++ b/main/app.py
@@ -0,0 +1,16 @@
+from flask import Flask
+from core.api.routes import user_bp
+from core.user_service import UserService
+from core.email_service import EmailService
+
+app = Flask(__name__)
+
+# Register blueprints
+app.register_blueprint(user_bp)
+
+# Initialize services
+email_service = EmailService()
+# Initialize your database session here
+
+if __name__ == "__main__":
+ app.run(debug=True)
\ No newline at end of file