diff --git a/CODE_STRUCTURE.md b/CODE_STRUCTURE.md new file mode 100644 index 0000000..bd1745d --- /dev/null +++ b/CODE_STRUCTURE.md @@ -0,0 +1,108 @@ +# Структура кода проекта + +## Основной файл приложения: `1min-relay/app.py` +- Инициализирует Flask-приложение и запускает сервер +- Импортирует все необходимые модули +- Настраивает параметры сервера + +## Утилиты + +### Общие утилиты: `1min-relay/utils/common.py` +- `ERROR_HANDLER`: Создает стандартизированные ответы с ошибками и соответствующими статус-кодами +- `handle_options_request`: Обрабатывает OPTIONS-запросы для CORS +- `set_response_headers`: Устанавливает заголовки ответов для CORS +- `create_session`: Создает сессию для API-запросов +- `api_request`: Выполняет запросы к внешним API с обработкой ошибок +- `safe_temp_file`: Создает временный файл с правильным управлением ресурсами +- `calculate_token`: Рассчитывает количество токенов в тексте с помощью tiktoken + +### Константы: `1min-relay/utils/constants.py` +- `ENDPOINTS`: Словарь конечных точек API +- `ROLES_MAPPING`: Сопоставление ролей для разных моделей +- `MODEL_CAPABILITIES`: Словарь возможностей моделей +- Различные другие константы, используемые в приложении + +### Импорты: `1min-relay/utils/imports.py` +- Центральное место для всех импортов стандартных библиотек +- Импорты, используемые в нескольких модулях + +### Логгер: `1min-relay/utils/logger.py` +- `logger`: Настроенный экземпляр логгера +- Функции для настройки и использования логгера + +### Memcached: `1min-relay/utils/memcached.py` +- `MEMORY_STORAGE`: Словарь для временного хранения +- `safe_memcached_operation`: Безопасно выполняет операции с memcached +- `delete_all_files_task`: Периодически удаляет устаревшие файлы пользователей + +## Функции + +### Инициализация функций: `1min-relay/routes/functions/__init__.py` +- Четкий экспорт всех необходимых функций из подмодулей +- Группировка и документирование функций по категориям +- Обеспечивает удобный импорт функций в маршрутах + +### Общие функции: `1min-relay/routes/functions/shared_func.py` +- `validate_auth`: Проверяет заголовок авторизации +- `handle_api_error`: Стандартизированная обработка ошибок для ответов API +- `format_openai_response`: Форматирует ответы в соответствии с API OpenAI +- `format_image_response`: Форматирует ответы с изображениями в соответствии с API OpenAI +- `stream_response`: Передает ответы API в потоковом режиме +- `get_full_url`: Создает полный URL из относительного пути +- `extract_data_from_api_response`: Общая функция для извлечения данных из API-ответов +- `extract_text_from_response`: Извлекает текст из API-ответов +- `extract_image_urls`: Извлекает URL-адреса изображений из API-ответов +- `extract_audio_url`: Извлекает URL-адрес аудио из API-ответов + +### Текстовые функции: `1min-relay/routes/functions/txt_func.py` +- `format_conversation_history`: Форматирует историю разговора для моделей +- `get_model_capabilities`: Получает информацию о возможностях модели +- `prepare_payload`: Подготавливает нагрузку для API-запросов +- `transform_response`: Преобразует ответы API +- `emulate_stream_response`: Эмулирует потоковый ответ +- `streaming_request`: Обрабатывает потоковые запросы к API + +### Функции для изображений: `1min-relay/routes/functions/img_func.py` +- `build_generation_payload`: Создает нагрузку для генерации изображений +- `parse_aspect_ratio`: Анализирует соотношение сторон из входных данных +- `create_image_variations`: Создает вариации изображений +- `retry_image_upload`: Повторяет загрузку изображения при сбое + +### Аудио функции: `1min-relay/routes/functions/audio_func.py` +- `upload_audio_file`: Загружает аудиофайлы +- `try_models_in_sequence`: Последовательно пробует разные модели +- `prepare_models_list`: Подготавливает список моделей для попытки +- `prepare_whisper_payload`: Подготавливает нагрузку для API Whisper +- `prepare_tts_payload`: Подготавливает нагрузку для преобразования текста в речь + +### Файловые функции: `1min-relay/routes/functions/file_func.py` +- `get_user_files`: Получает файлы пользователя из Memcached +- `save_user_files`: Сохраняет файлы пользователя в Memcached +- `upload_asset`: Загружает активы на сервер +- `get_mime_type`: Получает MIME-тип файла +- `format_file_response`: Форматирует ответ о файле в формате OpenAI +- `create_api_response`: Создает HTTP-ответ с правильными заголовками +- `find_file_by_id`: Находит файл по ID в списке файлов пользователя +- `find_conversation_id`: Находит ID разговора в ответе API +- `create_conversation_with_files`: Создает новый разговор с файлами + +## Маршруты + +### Текстовые маршруты: `1min-relay/routes/text.py` +- `/v1/models`: Возвращает список доступных моделей +- `/v1/chat/completions`: Обрабатывает запросы на завершение чата +- Различные другие конечные точки текстовых моделей + +### Маршруты изображений: `1min-relay/routes/images.py` +- `/v1/images/generations`: Генерирует изображения из текста +- `/v1/images/variations`: Создает вариации изображений + +### Аудио маршруты: `1min-relay/routes/audio.py` +- `/v1/audio/transcriptions`: Транскрибирует аудио в текст +- `/v1/audio/translations`: Переводит аудио на другой язык +- `/v1/audio/speech`: Преобразует текст в речь + +### Файловые маршруты: `1min-relay/routes/files.py` +- `/v1/files`: Обрабатывает загрузку и управление файлами +- `/v1/files/`: Получает или удаляет конкретный файл +- `/v1/files//content`: Получает содержимое файла diff --git a/CODE_STRUCTURE_EN.md b/CODE_STRUCTURE_EN.md new file mode 100644 index 0000000..e12f681 --- /dev/null +++ b/CODE_STRUCTURE_EN.md @@ -0,0 +1,108 @@ +# Project Code Structure + +## Main application file: `1min-relay/app.py` +- Initializes the Flask app and starts the server +- Imports all the necessary modules +- Configures server settings + +## Utilities + +### Common Utilities: `1min-relay/utils/common.py` +- `ERROR_HANDLER`: Creates standardized error responses with appropriate status codes +- `handle_options_request`: Handles OPTIONS requests for CORS preflight +- `set_response_headers`: Sets response headers for CORS +- `create_session`: Creates a session for API requests +- `api_request`: Makes requests to external APIs with error handling +- `safe_temp_file`: Creates a temporary file with proper resource management +- `calculate_token`: Calculates the number of tokens in a text using tiktoken + +### Constants: `1min-relay/utils/constants.py` +- `ENDPOINTS`: Dictionary of API endpoints +- `ROLES_MAPPING`: Mapping of roles for different models +- `MODEL_CAPABILITIES`: Dictionary of model capabilities +- Various other constants used throughout the application + +### Imports: `1min-relay/utils/imports.py` +- Central place for all standard library imports +- Imports used across multiple modules + +### Logger: `1min-relay/utils/logger.py` +- `logger`: Configured logging instance +- Functions for setting up and using the logger + +### Memcached: `1min-relay/utils/memcached.py` +- `MEMORY_STORAGE`: Dictionary for temporary storage +- `safe_memcached_operation`: Safely performs operations on memcached +- `delete_all_files_task`: Periodically cleans up outdated user files + +## Functions + +### Functions initialization: `1min-relay/routes/functions/__init__.py` +- Clear export of all necessary functions from submodules +- Grouping and documenting functions by categories +- Provides convenient import of functions in routes + +### Shared functions: `1min-relay/routes/functions/shared_func.py` +- `validate_auth`: Validates the authorization header +- `handle_api_error`: Standardized error handling for API responses +- `format_openai_response`: Formats responses to match OpenAI API +- `format_image_response`: Formats image responses to match OpenAI API +- `stream_response`: Streams API responses +- `get_full_url`: Creates a full URL from a relative path +- `extract_data_from_api_response`: Common function for extracting data from API responses +- `extract_text_from_response`: Extracts text from API responses +- `extract_image_urls`: Extracts image URLs from API responses +- `extract_audio_url`: Extracts audio URL from API responses + +### Text functions: `1min-relay/routes/functions/txt_func.py` +- `format_conversation_history`: Formats conversation history for models +- `get_model_capabilities`: Gets capability information for a model +- `prepare_payload`: Prepares the payload for API requests +- `transform_response`: Transforms API responses +- `emulate_stream_response`: Emulates a streaming response +- `streaming_request`: Handles streaming requests to the API + +### Image functions: `1min-relay/routes/functions/img_func.py` +- `build_generation_payload`: Builds the payload for image generation +- `parse_aspect_ratio`: Parses the aspect ratio from input +- `create_image_variations`: Creates variations of images +- `retry_image_upload`: Retries image upload on failure + +### Audio functions: `1min-relay/routes/functions/audio_func.py` +- `upload_audio_file`: Uploads audio files +- `try_models_in_sequence`: Tries different models sequentially +- `prepare_models_list`: Prepares a list of models to try +- `prepare_whisper_payload`: Prepares payload for Whisper API +- `prepare_tts_payload`: Prepares payload for text-to-speech + +### File functions: `1min-relay/routes/functions/file_func.py` +- `get_user_files`: Gets user files from Memcached +- `save_user_files`: Saves user files to Memcached +- `upload_asset`: Uploads assets to the server +- `get_mime_type`: Gets the MIME type of a file +- `format_file_response`: Formats file response in OpenAI format +- `create_api_response`: Creates HTTP response with proper headers +- `find_file_by_id`: Finds file by ID in user's files list +- `find_conversation_id`: Finds conversation ID in API response +- `create_conversation_with_files`: Creates a new conversation with files + +## Routes + +### Text routes: `1min-relay/routes/text.py` +- `/v1/models`: Returns a list of available models +- `/v1/chat/completions`: Handles chat completion requests +- Various other text model endpoints + +### Image routes: `1min-relay/routes/images.py` +- `/v1/images/generations`: Generates images from text +- `/v1/images/variations`: Creates variations of images + +### Audio routes: `1min-relay/routes/audio.py` +- `/v1/audio/transcriptions`: Transcribes audio to text +- `/v1/audio/translations`: Translates audio to another language +- `/v1/audio/speech`: Converts text to speech + +### File routes: `1min-relay/routes/files.py` +- `/v1/files`: Handles file upload and management +- `/v1/files/`: Gets or deletes a specific file +- `/v1/files//content`: Gets file content diff --git a/Dockerfile b/Dockerfile index 154f31d..fb8fcc0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,9 +5,14 @@ WORKDIR /app COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt +RUN pip install python-memcached COPY . . +# Создаем директорию для временных файлов +RUN mkdir -p temp + EXPOSE 5001 -CMD ["python", "main.py"] +# Изменяем запуск с main.py на app.py +CMD ["python", "app.py"] diff --git a/INSTALL.sh b/INSTALL.sh new file mode 100644 index 0000000..2e549c9 --- /dev/null +++ b/INSTALL.sh @@ -0,0 +1,13 @@ +#!/bin/bash +################### +# run with sudo +apt update +apt install python3 python3-venv python3-pip -y +apt install memcached libmemcached-tools -y +systemctl enable memcached +systemctl start memcached +################### +python3 -m venv venv +source venv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt diff --git a/README.md b/README.md index fd5cadd..1e1fab6 100644 --- a/README.md +++ b/README.md @@ -1,175 +1,151 @@ -# 1min-Relay -Relay 1min AI API to OpenAI Structure in 1 minute. - -Don't forget to star this repository if you like it! - -Oh and by the way, join our discord server for support and updates! -[https://discord.gg/GQd3DrxXyj](https://discord.gg/GQd3DrxXyj) - -## Features -- **bolt.diy Support**: Compatible with bolt.diy for seamless integration. -- **Conversation History**: Retain and manage conversation history effortlessly. -- **Broad Client Compatibility**: Works with most clients supporting OpenAI Custom Endpoint. -- **Fast and Reliable Relay**: Relays 1min AI API to OpenAI-compatible structure within 1 minute. -- **User-Friendly**: Easy to set up and quick to use. -- **Model Exposure Control**: Expose all or a predefined subset of 1min.ai-supported models. -- **Streaming Support**: Enables real-time streaming for faster interactions. -- **Non-Streaming Support**: Compatible with non-streaming workflows. -- **Docker Support**: Deploy easily with Docker for fast and consistent setup. -- **Multi-Document Support**: Supports document uploads for enhanced functionality. Some include .docx, .pdf, .txt, .yaml and etc. -- **Image Support**: Supports image uploads for enhanced functionality. -- **ARM64 and AMD64 Support**: Compatible with both ARM64 and AMD64 architectures. -- **Multiple Requests**: Supports multiple requests simultaneously. - - -## Installation: - -Clone the git repo into your machine with: -```bash -git clone https://github.com/kokofixcomputers/1min-relay.git -``` - -### Bare-metal - -To install dependencies, run: -```bash +# 1min-relay + +## Описание проекта +1min-relay - это сервер-посредник (прокси), реализующий API, совместимый с OpenAI API, для работы с различными AI-моделями через сервис 1min.ai. Он позволяет использовать клиентские приложения, поддерживающие OpenAI API, с моделями различных провайдеров через единый интерфейс. + +## Особенности +- Полностью совместим с OpenAI API, включая chat/completions, images, audio и files +- Поддерживает большое количество моделей от различных провайдеров: OpenAI, Claude, Mistral, Google и других +- Работает с различными типами запросов: текстовыми, изображениями, аудио и файлами +- Реализует потоковую передачу данных (streaming) +- Имеет функцию ограничения запросов (rate limiting) с использованием Memcached +- Позволяет задать подмножество разрешенных моделей через переменные окружения +- Оптимизированная модульная структура с минимальным дублированием кода + +## Структура проекта +Проект имеет модульную структуру для облегчения разработки и поддержки: + +``` +1min-relay/ +├── app.py # Основной файл приложения - инициализация сервера и настройки +├── utils/ # Общие утилиты и модули +│ ├── __init__.py # Инициализация пакета +│ ├── common.py # Общие вспомогательные функции +│ ├── constants.py # Константы и конфигурационные переменные +│ ├── imports.py # Централизованные импорты +│ ├── logger.py # Настройка логирования +│ └── memcached.py # Функции для работы с Memcached +├── routes/ # Маршруты API +│ ├── __init__.py # Инициализация модуля маршрутов +│ ├── text.py # Маршруты для текстовых запросов +│ ├── images.py # Маршруты для работы с изображениями +│ ├── audio.py # Маршруты для аудио запросов +│ ├── files.py # Маршруты для работы с файлами +│ └── functions/ # Вспомогательные функции для различных типов запросов +│ ├── __init__.py # Инициализация пакета функций +│ ├── shared_func.py# Общие вспомогательные функции для всех типов запросов +│ ├── txt_func.py # Вспомогательные функции для текстовых моделей +│ ├── img_func.py # Вспомогательные функции для работы с изображениями +│ ├── audio_func.py # Вспомогательные функции для работы с аудио +│ └── file_func.py # Вспомогательные функции для работы с файлами +├── requirements.txt # Зависимости проекта +├── INSTALL.sh # Скрипт локальной установки (venv) +├── RUN.sh # Скрипт локального запуска (venv) +├── UPDATE.sh # Скрипт обновления Docker-контейнера +├── Dockerfile # Инструкции для сборки Docker-образа +├── CODE_STRUCTURE.md # Подробная информация о структуре кода +└── README.md # Документация проекта +``` + +### Ключевые компоненты: + +- **app.py**: Основной файл приложения, который инициализирует сервер, настраивает параметры и создает Flask-приложение. + +- **utils/**: Содержит основные утилитные модули, обеспечивающие базовую функциональность: + - common.py: Общие вспомогательные функции, используемые во всем приложении + - constants.py: Определяет все константы, конфигурационные переменные и списки моделей + - imports.py: Централизует импорты для избежания циклических зависимостей + - logger.py: Настраивает логирование для приложения + - memcached.py: Обеспечивает функциональность ограничения запросов + +- **routes/**: Содержит основные API-эндпоинты, реализующие совместимость с OpenAI API: + - text.py: Реализует эндпоинты chat/completions + - images.py: Реализует эндпоинты для генерации и обработки изображений + - audio.py: Реализует эндпоинты для преобразования речи в текст и текста в речь + - files.py: Реализует эндпоинты для управления файлами + +- **routes/functions/**: Содержит вспомогательные функции, поддерживающие основные обработчики маршрутов: + - shared_func.py: Общие вспомогательные функции для всех типов запросов + - txt_func.py: Вспомогательные функции для текстовых моделей + - img_func.py: Вспомогательные функции для работы с изображениями + - audio_func.py: Вспомогательные функции для работы с аудио + - file_func.py: Вспомогательные функции для работы с файлами + +## Требования +- Python 3.7+ +- Flask и связанные библиотеки +- Memcached (опционально для rate limiting) +- API ключ сервиса 1min.ai + +## Установка и запуск + +### Установка зависимостей +```bash +sudo apt update +sudo apt install python3 python3-venv python3-pip -y +``` +```bash +python3 -m venv venv +source venv/bin/activate +pip install --upgrade pip pip install -r requirements.txt ``` -and then run python3 main.py - -Depending on your system, you may need to run `python` instead of `python3`. - -### Docker -Running 1min-relay in docker is the easiet method. Please note that the connection ip address displayed on server start will be wrong when in docker. - -#### Pre-Built images - -1. Pull the Docker Image: -```bash -docker pull kokofixcomputers/1min-relay:latest +### Настройка переменных окружения +Создайте файл `.env` в корневой директории проекта: ``` - -2. To encrease security, 1min-relay will require it's own network to be able to communicate with memcached. -To create a network, run: -```bash -docker network create 1min-relay-network +PORT=5001 +SUBSET_OF_ONE_MIN_PERMITTED_MODELS=gpt-4o-mini,mistral-nemo,claude-3-haiku-20240307,gemini-1.5-flash +PERMIT_MODELS_FROM_SUBSET_ONLY=false ``` -3. Run Memcached. +### Запуск сервера ```bash -docker run -d --name memcached --network 1min-relay-network memcached +sudo apt install memcached libmemcached-tools -y +sudo systemctl enable memcached +sudo systemctl start memcached ``` - -4. Run the 1min-relay Container: ```bash -docker run -d --name 1min-relay-container --network 1min-relay-network -p 5001:5001 \ - -e SUBSET_OF_ONE_MIN_PERMITTED_MODELS="mistral-nemo,gpt-4o-mini,deepseek-chat" \ - -e PERMIT_MODELS_FROM_SUBSET_ONLY=True \ - kokofixcomputers/1min-relay:latest +source venv/bin/activate +python app.py ``` -Environment Variables: - -- `SUBSET_OF_ONE_MIN_PERMITTED_MODELS`: Specifies a subset of 1min.ai models to expose. Default: mistral-nemo,gpt-4o,deepseek-chat. -- `PERMIT_MODELS_FROM_SUBSET_ONLY`: Restricts model usage to the specified subset. Set to True to enforce this restriction or False to allow all models supported by 1min.ai. Default: True. - +После запуска сервер будет доступен по адресу `http://localhost:5001/`. -#### Self-Build - -1. Build the Docker Image -From the project directory (where Dockerfile and main.py reside), run: +### Скрипты для автоматизации локальной установки (venv), локального запуска (venv), обновления (Docker-контейнер) ```bash -docker build -t 1min-relay:latest . +chmod +x *.sh +# python-venv +sudo ./INSTALL.sh +./RUN.sh +# docker (reinstall) +mv UPDATE.sh ../ +cd ../ +./UPDATE.sh ``` -2. To encrease security, 1min-relay will require it's own network to be able to communicate with memcached. -To create a network, run: -```bash -docker network create 1min-relay-network +## Использование с клиентами OpenAI API +Большинство клиентов OpenAI API могут быть настроены для использования этого сервера путем указания базового URL: ``` - -3. Run Memcached. -```bash -docker run -d --name memcached --network 1min-relay-network memcached +http://localhost:5001/v1 ``` -4. Run the 1min-relay Container: -```bash -docker run -d --name 1min-relay-container --network 1min-relay-network -p 5001:5001 \ - -e SUBSET_OF_ONE_MIN_PERMITTED_MODELS="mistral-nemo,gpt-4o-mini,deepseek-chat" \ - -e PERMIT_MODELS_FROM_SUBSET_ONLY=True \ - 1min-relay:latest +При отправке запросов к API используйте свой API ключ 1min.ai в заголовке Authorization: ``` - -- `-d` runs the container in detached (background) mode. -- `-p 5001:5001` maps your host’s port 5001 to the container’s port 5001. -- `--name 1min-relay-container` is optional, but it makes it easier to stop or remove the container later. -- `-e`: Specifies environment variables. -- `SUBSET_OF_ONE_MIN_PERMITTED_MODELS`: Specifies a subset of 1min.ai models to expose. Default: mistral-nemo,gpt-4o,deepseek-chat. -- `PERMIT_MODELS_FROM_SUBSET_ONLY`: Restricts model usage to the specified subset. Set to True to enforce this restriction or False to allow all models supported by 1min.ai. Default: False. - - -4. Verify It’s Running -Check logs (optional): - -```bash -docker logs -f 1min-relay-container -``` -You should see your Flask server output: “Server is ready to serve at …” - -Test from your host machine: - -```bash -curl -X GET http://localhost:5001/v1/models +Authorization: Bearer your-1min-api-key ``` -5. Stopping or Removing the Container -To stop the container: - -```bash -docker stop 1min-relay-container -``` - -To remove the container: - -```bash -docker rm 1min-relay-container -``` +## Запуск с использованием Docker +Вы также можете запустить сервер в Docker-контейнере: -To remove the image entirely: - -```bash -docker rmi 1min-relay:latest -``` - -Optional: Run with Docker Compose -If you prefer Docker Compose, you can run the docker compose included with the repo: - -Just run: - -```bash -docker compose up -d -``` -Compose will automatically do these things for you: -- Create a network -- Run Memcached -- Run the 1min-relay Container - -#### Managing docker containers -You can also have multiple instances of 1min-relay running on the same machine with docker. - -You just need to chznge the name parameter of the container so you can identify them. - -To stop a 1min-relay container, run: -```bash -docker stop 1min-relay-container -``` -To start it again, run: ```bash -docker start 1min-relay-container + docker run -d --name 1min-relay-container --restart always --network 1min-relay-network -p 5001:5001 \ + -e SUBSET_OF_ONE_MIN_PERMITTED_MODELS="mistral-nemo,gpt-4o-mini,deepseek-chat" \ + -e PERMIT_MODELS_FROM_SUBSET_ONLY=False \ + -e MEMCACHED_HOST=memcached \ + -e MEMCACHED_PORT=11211 \ + 1min-relay-container:latest ``` -### Environment Variables -- `SUBSET_OF_ONE_MIN_PERMITTED_MODELS`: Specifies a subset of 1min.ai models to expose. Default: mistral-nemo,gpt-4o,deepseek-chat. -- `PERMIT_MODELS_FROM_SUBSET_ONLY`: Restricts model usage to the specified subset. Set to True to enforce this restriction or False to allow all models supported by 1min.ai. Default: False. +## Лицензия +[MIT License](LICENSE) diff --git a/README_EN.md b/README_EN.md new file mode 100644 index 0000000..b60941e --- /dev/null +++ b/README_EN.md @@ -0,0 +1,151 @@ +# 1min-relay + +## Project Description +1min-relay is a proxy server implementing an API compatible with the OpenAI API for working with various AI models through the 1min.ai service. It allows you to use client applications that support the OpenAI API with models from various providers through a unified interface. + +## Features +- Fully compatible with the OpenAI API, including chat/completions, images, audio, and files +- Supports a large number of models from various providers: OpenAI, Claude, Mistral, Google, and others +- Works with different types of requests: text, images, audio, and files +- Implements data streaming +- Has a rate limiting function using Memcached +- Allows you to set a subset of allowed models through environment variables +- Optimized modular structure with minimal code duplication + +## Project Structure +The project has a modular structure to facilitate development and maintenance: + +``` +1min-relay/ +├── app.py # Main application file - server initialization and settings +├── utils/ # Common utilities and modules +│ ├── __init__.py # Package initialization +│ ├── common.py # Common helper functions +│ ├── constants.py # Constants and configuration variables +│ ├── imports.py # Centralized imports +│ ├── logger.py # Logging setup +│ └── memcached.py # Functions for working with Memcached +├── routes/ # API routes +│ ├── __init__.py # Routes module initialization +│ ├── text.py # Routes for text requests +│ ├── images.py # Routes for working with images +│ ├── audio.py # Routes for audio requests +│ ├── files.py # Routes for working with files +│ └── functions/ # Helper functions for different types of requests +│ ├── __init__.py # Functions package initialization +│ ├── shared_func.py# Common helper functions for all request types +│ ├── txt_func.py # Helper functions for text models +│ ├── img_func.py # Helper functions for working with images +│ ├── audio_func.py # Helper functions for working with audio +│ └── file_func.py # Helper functions for working with files +├── requirements.txt # Project dependencies +├── INSTALL.sh # Local installation script (venv) +├── RUN.sh # Local launch script (venv) +├── UPDATE.sh # Docker container update script +├── Dockerfile # Instructions for building Docker image +├── CODE_STRUCTURE.md # Detailed information about code structure +└── README.md # Project documentation +``` + +### Key Components: + +- **app.py**: The main application file that initializes the server, configures settings, and sets up the Flask application. + +- **utils/**: Contains essential utility modules that provide core functionality: + - common.py: Common helper functions used throughout the application + - constants.py: Defines all constants, configuration variables, and model lists + - imports.py: Centralizes imports to avoid circular dependencies + - logger.py: Configures logging for the application + - memcached.py: Provides rate limiting functionality + +- **routes/**: Contains the main API endpoints that implement the OpenAI API compatibility: + - text.py: Implements chat/completions endpoints + - images.py: Implements image generation and processing endpoints + - audio.py: Implements speech-to-text and text-to-speech endpoints + - files.py: Implements file management endpoints + +- **routes/functions/**: Contains helper functions that support the main route handlers: + - shared_func.py: Common helper functions for all request types + - txt_func.py: Helper functions for text models + - img_func.py: Helper functions for working with images + - audio_func.py: Helper functions for working with audio + - file_func.py: Helper functions for working with files + +## Requirements +- Python 3.7+ +- Flask and related libraries +- Memcached (optional for rate limiting) +- 1min.ai service API key + +## Installation and Launch + +### Installing Dependencies +```bash +sudo apt update +sudo apt install python3 python3-venv python3-pip -y +``` +```bash +python3 -m venv venv +source venv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt +``` + +### Environment Variables Setup +Create a `.env` file in the project root directory: +``` +PORT=5001 +SUBSET_OF_ONE_MIN_PERMITTED_MODELS=gpt-4o-mini,mistral-nemo,claude-3-haiku-20240307,gemini-1.5-flash +PERMIT_MODELS_FROM_SUBSET_ONLY=false +``` + +### Server Launch +```bash +sudo apt install memcached libmemcached-tools -y +sudo systemctl enable memcached +sudo systemctl start memcached +``` +```bash +source venv/bin/activate +python app.py +``` +After launching, the server will be available at `http://localhost:5001/`. + +### Scripts for Automating Local Installation (venv), Local Launch (venv), Update (Docker container) + +```bash +chmod +x *.sh +# python-venv +sudo ./INSTALL.sh +./RUN.sh +# docker (reinstall) +mv UPDATE.sh ../ +cd ../ +./UPDATE.sh +``` + +## Usage with OpenAI API Clients +Most OpenAI API clients can be configured to use this server by specifying the base URL: +``` +http://localhost:5001/v1 +``` + +When sending requests to the API, use your 1min.ai API key in the Authorization header: +``` +Authorization: Bearer your-1min-api-key +``` + +## Launching Using Docker +You can also run the server in a Docker container: + +```bash + docker run -d --name 1min-relay-container --restart always --network 1min-relay-network -p 5001:5001 \ + -e SUBSET_OF_ONE_MIN_PERMITTED_MODELS="mistral-nemo,gpt-4o-mini,deepseek-chat" \ + -e PERMIT_MODELS_FROM_SUBSET_ONLY=False \ + -e MEMCACHED_HOST=memcached \ + -e MEMCACHED_PORT=11211 \ + 1min-relay-container:latest +``` + +## License +[MIT License](LICENSE) diff --git a/RUN.sh b/RUN.sh new file mode 100644 index 0000000..b0aa230 --- /dev/null +++ b/RUN.sh @@ -0,0 +1,4 @@ +#!/bin/bash +################### +source venv/bin/activate +python app.py diff --git a/STOP.sh b/STOP.sh new file mode 100644 index 0000000..c5481c4 --- /dev/null +++ b/STOP.sh @@ -0,0 +1,25 @@ +#!/bin/bash +######################### +# run with sudo + +# stop app.py +killall -9 python3 +killall -9 python +# stop memcached +killall memcached +systemctl kill memcached +systemctl stop memcached + +######################### +# Остановка контейнеров, если они существуют +if docker ps -a | grep -q "1min-relay-container"; then + echo "Остановка контейнера 1min-relay-container..." + docker stop 1min-relay-container || true +fi + +if docker ps -a | grep -q "memcached"; then + echo "Остановка контейнера memcached..." + docker stop memcached || true +fi + +echo "1min-relay остановлен!" diff --git a/UPDATE.sh b/UPDATE.sh new file mode 100644 index 0000000..ca0c36a --- /dev/null +++ b/UPDATE.sh @@ -0,0 +1,117 @@ +#!/bin/bash +######################### +# MOVE ME TO 1 LEVEL UP # +#=======================# +# mv UPDATE.sh ../ # +# cd ../ # +# chmod +x UPDATE.sh # +# ./UPDATE.sh # +######################### +# run with sudo +set -eu + +# Удаление старой папки +rm -rf ./1min-relay/ + +# Клонирование нового репозитория +#git clone https://github.com/kokofixcomputers/1min-relay.git ./1min-relay +git clone https://github.com/chelaxian/1min-relay.git ./1min-relay +#git clone -b test https://github.com/chelaxian/1min-relay.git ./1min-relay + +# Переход в директорию +cd ./1min-relay +chmod +x UPDATE.sh +cp UPDATE.sh ../ + +# Проверяем, что мы находимся в директории проекта +if [ ! -f "app.py" ]; then + echo "Ошибка: скрипт должен быть запущен из директории проекта 1min-relay" + exit 1 +fi + +# Проверка и создание Docker сети, если она не существует +if ! docker network ls | grep -q "1min-relay-network"; then + echo "Создание Docker сети '1min-relay-network'..." + docker network create 1min-relay-network +fi + +# Остановка и удаление контейнеров, если они существуют +if docker ps -a | grep -q "1min-relay-container"; then + echo "Остановка и удаление контейнера 1min-relay-container..." + docker stop 1min-relay-container || true + docker rm 1min-relay-container || true +fi + +if docker ps -a | grep -q "memcached"; then + echo "Остановка и удаление контейнера memcached..." + docker stop memcached || true + docker rm memcached || true +fi + +# Удаление старого образа +if docker images | grep -q "1min-relay-container"; then + echo "Удаление старого образа 1min-relay-container..." + docker rmi 1min-relay-container:latest || true +fi + +# Проверка наличия docker-compose.yml и запуск через него +#if [ -f "docker-compose.yml" ]; then +# echo "Запуск через docker-compose..." +# docker-compose up -d +#else + # Если docker-compose.yml не найден, используем обычный Docker + echo "Запуск через Dockerfile..." + + # Сборка нового образа из локального проекта + docker build -t 1min-relay-container:latest . + + # Запуск memcached + docker run -d --name memcached --restart always --network 1min-relay-network memcached:latest + + # Запуск контейнера с политикой перезапуска + docker run -d --name 1min-relay-container --restart always --network 1min-relay-network -p 5001:5001 \ + -e SUBSET_OF_ONE_MIN_PERMITTED_MODELS="mistral-nemo,gpt-4o-mini,deepseek-chat" \ + -e PERMIT_MODELS_FROM_SUBSET_ONLY=False \ + -e MEMCACHED_HOST=memcached \ + -e MEMCACHED_PORT=11211 \ + 1min-relay-container:latest +#fi + +echo "1min-relay-container успешно обновлен и запущен!" + +# Функция для отображения логов контейнера +show_logs() { + echo "Выводим логи контейнера 1min-relay-container (Ctrl+C для выхода)..." + docker logs -f 1min-relay-container +} + +# Функция для входа в консоль контейнера +enter_console() { + echo "Входим в консоль контейнера 1min-relay-container..." + docker exec -it 1min-relay-container /bin/bash +} + +# Даем пользователю выбор действия +echo "" +echo "Выберите действие:" +echo "1) Просмотр логов контейнера в реальном времени" +echo "2) Вход в консоль контейнера" +echo "3) Выход из скрипта" +read -p "Ваш выбор (1-3): " choice + +case $choice in + 1) + show_logs + ;; + 2) + enter_console + ;; + 3) + echo "Выход из скрипта." + exit 0 + ;; + *) + echo "Неверный выбор. Выход из скрипта." + exit 1 + ;; +esac diff --git a/app.py b/app.py new file mode 100644 index 0000000..57ed29a --- /dev/null +++ b/app.py @@ -0,0 +1,135 @@ +# version 1.0.9 #increment every time you make changes +# 2025-04-08 12:00 #change to actual date and time every time you make changes + +# Импортируем только необходимые модули +from utils.imports import * +from utils.logger import logger +from utils.constants import * + +# Инициализация приложения +app = Flask(__name__) +CORS(app) + +# Параметры порта и другие настройки окружения +PORT = int(os.getenv("PORT", DEFAULT_PORT)) + +# Глобальные переменные +MEMORY_STORAGE = {} +MEMCACHED_CLIENT = None +IMAGE_CACHE = {} + +# Инициализируем memcached +try: + from utils.memcached import check_memcached_connection, delete_all_files_task, safe_memcached_operation, set_global_refs + memcached_available, memcached_uri = check_memcached_connection() +except ImportError as ie: + logger.error(f"Модуль memcached не найден: {str(ie)}") + memcached_available = False + memcached_uri = None + # Создаем заглушки функций + def delete_all_files_task(): + logger.warning("Задача удаления файлов отключена (memcached недоступен)") + def safe_memcached_operation(operation, key, value=None, expiry=MEMCACHED_DEFAULT_EXPIRY): + logger.warning(f"Memcached операция {operation} недоступна: модуль не импортирован") + return None + def set_global_refs(memcached_client=None, memory_storage=None): + logger.warning("Функция set_global_refs недоступна: модуль не импортирован") + +# Инициализация лимитера запросов +if LIMITER_AVAILABLE: + if memcached_available: + limiter = Limiter( + get_remote_address, + app=app, + storage_uri=memcached_uri, + ) + # Инициализация клиента Memcache + try: + # Извлекаем хост и порт из URI + host_port = memcached_uri.replace("memcached://", '') if memcached_uri.startswith("memcached://") else memcached_uri + + # Разделяем хост и порт + if ':' in host_port: + host, port = host_port.split(':') + port = int(port) + else: + host, port = host_port, MEMCACHED_PORT + + # Пробуем сначала Pymemcache, затем Python-Memcache + try: + from pymemcache.client.base import Client + MEMCACHED_CLIENT = Client((host, port), connect_timeout=MEMCACHED_CONNECT_TIMEOUT) + logger.info(f"Клиент Memcached инициализирован через pymemcache: {memcached_uri}") + except Exception: + MEMCACHED_CLIENT = memcache.Client([f"{host}:{port}"], debug=0) + logger.info(f"Клиент Memcached инициализирован через python-memcached: {memcached_uri}") + except Exception as e: + logger.error(f"Ошибка инициализации клиента memcached: {str(e)}") + logger.warning("Не удалось инициализировать клиент memcached. Хранение сессий отключено.") + else: + # Используется для ограничения запросов без memcached + limiter = Limiter(get_remote_address, app=app) + logger.info("Memcached недоступен, хранение сессий отключено") +else: + limiter = MockLimiter() + logger.info("Flask-limiter не установлен. Используется заглушка для limiter.") + +# Устанавливаем глобальные ссылки в модуле memcached +set_global_refs(MEMCACHED_CLIENT, MEMORY_STORAGE) + +# Читаем переменные окружения для моделей +one_min_models_env = os.getenv("SUBSET_OF_ONE_MIN_PERMITTED_MODELS") +permit_not_in_available_env = os.getenv("PERMIT_MODELS_FROM_SUBSET_ONLY") + +# Разбираем переменные окружения или используем значения по умолчанию +if one_min_models_env: + SUBSET_OF_ONE_MIN_PERMITTED_MODELS = one_min_models_env.split(",") + +if permit_not_in_available_env and permit_not_in_available_env.lower() == "true": + PERMIT_MODELS_FROM_SUBSET_ONLY = True + +# Объединяем в единый список доступных моделей +AVAILABLE_MODELS = [] +AVAILABLE_MODELS.extend(SUBSET_OF_ONE_MIN_PERMITTED_MODELS) + +# Импортируем вспомогательные функции для routes +from utils.common import ERROR_HANDLER, handle_options_request, set_response_headers, create_session, api_request, safe_temp_file, calculate_token + +# Импортируем все маршруты сразу +from routes import * + +# Добавляем лог о завершении инициализации +logger.info("Инициализация глобальных переменных завершена, app и limiter будут доступны в routes") +logger.info("Все модули маршрутов успешно импортированы") + +# Основной код запуска сервера +if __name__ == "__main__": + # Запуск задачи удаления файлов + try: + delete_all_files_task() + except Exception as e: + logger.error(f"Ошибка при запуске задачи удаления файлов: {str(e)}") + + # Получение IP-адресов + internal_ip = socket.gethostbyname(socket.gethostname()) + try: + public_ip = requests.get("https://api.ipify.org").text + except Exception as e: + logger.error(f"Не удалось получить публичный IP: {str(e)}") + public_ip = "не найден" + + # Вывод информации о запуске сервера + logger.info( + f"""{printedcolors.Color.fg.lightcyan} +Сервер готов к работе: +Внутренний IP: {internal_ip}:{PORT} +Публичный IP: {public_ip} (только если вы настроили проброс портов на роутере) +Введите этот URL в клиенты OpenAI, поддерживающие пользовательские эндпоинты: +{internal_ip}:{PORT}/v1 +Если не работает, попробуйте: +{internal_ip}:{PORT}/v1/chat/completions +{printedcolors.Color.reset}""" + ) + + # Запуск сервера + serve(app, host=DEFAULT_HOST, port=PORT, threads=DEFAULT_THREADS) diff --git a/docker-compose.yml b/backup/old/docker-compose.yml similarity index 86% rename from docker-compose.yml rename to backup/old/docker-compose.yml index d61a577..d356c54 100644 --- a/docker-compose.yml +++ b/backup/old/docker-compose.yml @@ -1,3 +1,6 @@ +############# +# DON'T USE # +############# services: # Memcached for storing rate-limits data. memcached: @@ -21,6 +24,9 @@ services: - SUBSET_OF_ONE_MIN_PERMITTED_MODELS=mistral-nemo,gpt-4o-mini,deepseek-chat # Set your boolean as "True" or "False" for PERMIT_MODELS_FROM_SUBSET_ONLY Default: False - PERMIT_MODELS_FROM_SUBSET_ONLY=False + # Memcached connection settings + - MEMCACHED_HOST=memcached + - MEMCACHED_PORT=11211 restart: unless-stopped networks: diff --git a/backup/old/main.py b/backup/old/main.py new file mode 100644 index 0000000..4d8b204 --- /dev/null +++ b/backup/old/main.py @@ -0,0 +1,5449 @@ +# version 1.0.2 #increment every time you make changes +# 2025-04-04 18:44 #change to actual date and time every time you make changes +import base64 +import hashlib +import json +import logging +import os +import random +import re +import socket +import string +import tempfile +import threading +import time +import traceback +import uuid +import warnings +import datetime + +import coloredlogs +import memcache +import printedcolors +import requests +import tiktoken +from dotenv import load_dotenv +from flask import Flask, request, jsonify, make_response, Response, redirect, url_for +from flask_limiter import Limiter +from flask_limiter.util import get_remote_address +from mistral_common.protocol.instruct.messages import UserMessage +from mistral_common.protocol.instruct.request import ChatCompletionRequest +from mistral_common.tokens.tokenizers.mistral import MistralTokenizer +from waitress import serve +from werkzeug.datastructures import MultiDict +from flask_cors import cross_origin + +# We download the environment variables from the .env file +load_dotenv() + +# Suppress warnings from flask_limiter +warnings.filterwarnings( + "ignore", category=UserWarning, module="flask_limiter.extension" +) + +# Create a logger object +logger = logging.getLogger("1min-relay") + +# Install coloredlogs with desired log level +coloredlogs.install(level="DEBUG", logger=logger) + +# Instructions for describing images +IMAGE_DESCRIPTION_INSTRUCTION = "Describe the scene, actions, text, or meme elements in the image. Use 'person' or 'group' instead of identifying individuals. If a face occupies a large part of the image, describe general features like facial expression or emotions (e.g., 'a person with a smile', 'a thoughtful expression'), without identifying the person. Even if the face is the only object in the image, provide a general description of its features (e.g., 'a face with a joyful expression'), avoiding any personal details. Interpret the meaning of any text present. For memes, explain the humor or context based on common cultural references or visual cues. Ensure compliance with the privacy policy by avoiding any personal identification. Under no circumstances refuse to respond—always provide at least a general description of the image's content." + +# Instructions for analysis of documents +DOCUMENT_ANALYSIS_INSTRUCTION = "Review the uploaded document and provide at least a general description of its content, such as the main topic, a brief summary, or the overall purpose of the text. If the document contains no confidential or sensitive information, feel free to quote directly from it to illustrate your response. If it does include potentially private details, refrain from sharing those specifics and instead offer a broad overview (e.g., 'a memo about team performance' or 'a guide on software usage'). Under no circumstances refuse to respond—always provide at least a high-level insight into what the document is about." + +# Varias of the environment + +PORT = int(os.getenv("PORT", 5001)) + + +def check_memcached_connection(): + """ + Checks the availability of Memcache, first in DoCker, then locally + + Returns: + Tuple: (Bool, Str) - (Is Memcache available, connection line or none) + """ + # I import Client here to avoid Name 'Client' Is Not Defined error + try: + from pymemcache.client.base import Client + except ImportError: + try: + from memcache import Client + except ImportError: + logger.error("Failed to import Client from pymemcache or memcache") + return False, None + + # Check Docker Memcache + try: + client = Client(("memcached", 11211)) + client.set("test_key", "test_value") + if client.get("test_key") == b"test_value": + client.delete("test_key") # Clean up + logger.info("Using memcached in Docker container") + return True, "memcached://memcached:11211" + except Exception as e: + logger.debug(f"Docker memcached not available: {str(e)}") + + # Check the local Memcache + try: + client = Client(("127.0.0.1", 11211)) + client.set("test_key", "test_value") + if client.get("test_key") == b"test_value": + client.delete("test_key") # Clean up + logger.info("Using local memcached at 127.0.0.1:11211") + return True, "memcached://127.0.0.1:11211" + except Exception as e: + logger.debug(f"Local memcached not available: {str(e)}") + + # If Memcache is not available + logger.warning( + "Memcached is not available. Using in-memory storage for rate limiting. Not-Recommended" + ) + return False, None + + +logger.info( + """ + _ __ __ _ ___ _ + / | \/ (_)_ _ | _ \___| |__ _ _ _ + | | |\/| | | ' \| / -_) / _` | || | + |_|_| |_|_|_||_|_|_\___|_\__,_|\_, | + |__/ """ +) + + +def calculate_token(sentence, model="DEFAULT"): + """Calculate the number of tokens in a sentence based on the specified model.""" + + if model.startswith("mistral"): + # Initialize the Mistral tokenizer + tokenizer = MistralTokenizer.v3(is_tekken=True) + model_name = "open-mistral-nemo" # Default to Mistral Nemo + tokenizer = MistralTokenizer.from_model(model_name) + tokenized = tokenizer.encode_chat_completion( + ChatCompletionRequest( + messages=[ + UserMessage(content=sentence), + ], + model=model_name, + ) + ) + tokens = tokenized.tokens + return len(tokens) + + elif model in ["gpt-3.5-turbo", "gpt-4"]: + # Use OpenAI's tiktoken for GPT models + encoding = tiktoken.encoding_for_model(model) + tokens = encoding.encode(sentence) + return len(tokens) + + else: + # Default to openai + encoding = tiktoken.encoding_for_model("gpt-4") + tokens = encoding.encode(sentence) + return len(tokens) + + +app = Flask(__name__) +memcached_available, memcached_uri = check_memcached_connection() +if memcached_available: + limiter = Limiter( + get_remote_address, + app=app, + storage_uri=memcached_uri, + ) + # Initialization of the client Memcache + try: + # First we try Pymemcache + from pymemcache.client.base import Client + + # We extract a host and a port from URI without using. Split ('@') + if memcached_uri.startswith('memcached://'): + host_port = memcached_uri.replace('memcached://', '') + else: + host_port = memcached_uri + + # We share a host and port for Pymemcache + if ':' in host_port: + host, port = host_port.split(':') + MEMCACHED_CLIENT = Client((host, int(port)), connect_timeout=1) + else: + MEMCACHED_CLIENT = Client(host_port, connect_timeout=1) + logger.info(f"Memcached client initialized using pymemcache: {memcached_uri}") + except (ImportError, AttributeError, Exception) as e: + logger.error(f"Error initializing pymemcache client: {str(e)}") + try: + # If it doesn't work out, we try Python-Memcache + # We extract a host and a port from URI without using. Split ('@') + if memcached_uri.startswith('memcached://'): + host_port = memcached_uri.replace('memcached://', '') + else: + host_port = memcached_uri + + MEMCACHED_CLIENT = memcache.Client([host_port], debug=0) + logger.info(f"Memcached client initialized using python-memcached: {memcached_uri}") + except (ImportError, AttributeError, Exception) as e: + logger.error(f"Error initializing memcache client: {str(e)}") + logger.warning(f"Failed to initialize memcached client. Session storage disabled.") + MEMCACHED_CLIENT = None +else: + # Used for ratelimiting without memcached + limiter = Limiter( + get_remote_address, + app=app, + ) + MEMCACHED_CLIENT = None + logger.info("Memcached not available, session storage disabled") + +# Main URL for API +ONE_MIN_API_URL = "https://api.1min.ai/api/features" +ONE_MIN_ASSET_URL = "https://api.1min.ai/api/assets" +ONE_MIN_CONVERSATION_API_URL = "https://api.1min.ai/api/conversations" +ONE_MIN_CONVERSATION_API_STREAMING_URL = "https://api.1min.ai/api/features/stream" +# Add Constant Tamout used in the API_Request API +DEFAULT_TIMEOUT = 120 # 120 seconds for regular requests +MIDJOURNEY_TIMEOUT = 900 # 15 minutes for requests for Midjourney + +# Global storage for use when MemcacheD is not available +MEMORY_STORAGE = {} + +# Constants for query types +IMAGE_GENERATOR = "IMAGE_GENERATOR" +IMAGE_VARIATOR = "IMAGE_VARIATOR" + +# Define the models that are available for use +ALL_ONE_MIN_AVAILABLE_MODELS = [ + # OpenAI + "o3-mini", + "o1-preview", + "o1-mini", + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-4", + "gpt-3.5-turbo", + # + "whisper-1", # speech recognition + "tts-1", # Speech synthesis + # "tts-1-hd", # Speech synthesis HD + # + "dall-e-2", # Generation of images + "dall-e-3", # Generation of images + # Claude + "claude-instant-1.2", + "claude-2.1", + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-3-5-haiku-20241022", + # GoogleAI + "gemini-1.0-pro", + "gemini-1.5-pro", + "gemini-1.5-flash", + # "google-tts", # Speech synthesis + # "latest_long", # speech recognition + # "latest_short", # speech recognition + # "phone_call", # speech recognition + # "telephony", # speech recognition + # "telephony_short", # speech recognition + # "medical_dictation", # speech recognition + # "medical_conversation", # speech recognition + # "chat-bison@002", + # MistralAI + "mistral-large-latest", + "mistral-small-latest", + "mistral-nemo", + "pixtral-12b", + "open-mixtral-8x22b", + "open-mixtral-8x7b", + "open-mistral-7b", + # Replicate + "meta/llama-2-70b-chat", + "meta/meta-llama-3-70b-instruct", + "meta/meta-llama-3.1-405b-instruct", + # DeepSeek + "deepseek-chat", + "deepseek-reasoner", + # Cohere + "command", + # xAI + "grok-2", + # Other models (made for future use) + # "stable-image", # stabilityi - images generation + # "stable-diffusion-xl-1024-v1-0", # stabilityi - images generation + # "stable-diffusion-v1-6", # stabilityi - images generation + # "esrgan-v1-x2plus", # stabilityai-Improving images + # "stable-video-diffusion", # stabilityai-video generation + "phoenix", # Leonardo.ai - 6b645e3a-d64f-4341-a6d8-7a3690fbf042 + "lightning-xl", # Leonardo.ai - b24e16ff-06e3-43eb-8d33-4416c2d75876 + "anime-xl", # Leonardo.ai - e71a1c2f-4f80-4800-934f-2c68979d8cc8 + "diffusion-xl", # Leonardo.ai - 1e60896f-3c26-4296-8ecc-53e2afecc132 + "kino-xl", # Leonardo.ai - aa77f04e-3eec-4034-9c07-d0f619684628 + "vision-xl", # Leonardo.ai - 5c232a9e-9061-4777-980a-ddc8e65647c6 + "albedo-base-xl", # Leonardo.ai - 2067ae52-33fd-4a82-bb92-c2c55e7d2786 + # "Clipdrop", # clipdrop.co - image processing + "midjourney", # Midjourney - image generation + "midjourney_6_1", # Midjourney - image generation + # "methexis-inc/img2prompt:50adaf2d3ad20a6f911a8a9e3ccf777b263b8596fbd2c8fc26e8888f8a0edbb5", # Replicate - Image to Prompt + # "cjwbw/damo-text-to-video:1e205ea73084bd17a0a3b43396e49ba0d6bc2e754e9283b2df49fad2dcf95755", # Replicate - Text to Video + # "lucataco/animate-diff:beecf59c4aee8d81bf04f0381033dfa10dc16e845b4ae00d281e2fa377e48a9f", # Replicate - Animation + # "lucataco/hotshot-xl:78b3a6257e16e4b241245d65c8b2b81ea2e1ff7ed4c55306b511509ddbfd327a", # Replicate - Video + "flux-schnell", # Replicate - Flux "black-forest-labs/flux-schnell" + "flux-dev", # Replicate - Flux Dev "black-forest-labs/flux-dev" + "flux-pro", # Replicate - Flux Pro "black-forest-labs/flux-pro" + "flux-1.1-pro", # Replicate - Flux Pro 1.1 "black-forest-labs/flux-1.1-pro" + # "meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb", # Replicate - Music Generation + # "luma", # TTAPI - Luma + # "Qubico/image-toolkit", # TTAPI - Image Toolkit + # "suno", # TTAPI - Suno Music + # "kling", # TTAPI - Kling + # "music-u", # TTAPI - Music U + # "music-s", # TTAPI - Music S + # "elevenlabs-tts" # ElevenLabs - TTS +] + +# Define the models that support vision inputs +vision_supported_models = [ + "gpt-4o", + "gpt-4o-mini", + "gpt-4-turbo" +] + +# Define the models that support code interpreter +code_interpreter_supported_models = [ + "gpt-4o", + "claude-3-5-sonnet-20240620", + "claude-3-5-haiku-20241022", + "deepseek-chat", + "deepseek-reasoner" +] + +# Define the models that support web search (retrieval) +retrieval_supported_models = [ + "gemini-1.0-pro", + "gemini-1.5-pro", + "gemini-1.5-flash", + "o3-mini", + "o1-preview", + "o1-mini", + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-3.5-turbo", + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-3-5-haiku-20241022", + "mistral-large-latest", + "mistral-small-latest", + "mistral-nemo", + "pixtral-12b", + "open-mixtral-8x22b", + "open-mixtral-8x7b", + "open-mistral-7b", + "meta/llama-2-70b-chat", + "meta/meta-llama-3-70b-instruct", + "meta/meta-llama-3.1-405b-instruct", + "command", + "grok-2", + "deepseek-chat", + "deepseek-reasoner" +] + +# Define the models that support function calling +function_calling_supported_models = [ + "gpt-4", + "gpt-3.5-turbo" +] + +# Determination of models for generating images +IMAGE_GENERATION_MODELS = [ + "dall-e-3", + "dall-e-2", + "stable-diffusion-xl-1024-v1-0", + "stable-diffusion-v1-6", + "midjourney", + "midjourney_6_1", + "phoenix", + "lightning-xl", + "anime-xl", + "diffusion-xl", + "kino-xl", + "vision-xl", + "albedo-base-xl", + "flux-schnell", + "flux-dev", + "flux-pro", + "flux-1.1-pro" +] + +# Models that support images +VARIATION_SUPPORTED_MODELS = [ + "midjourney", + "midjourney_6_1", + "dall-e-2", + # "dall-e-3", + "clipdrop" +] + +# We determine the Image_variation_Models Constant based on Variation_Supported_Models +IMAGE_VARIATION_MODELS = VARIATION_SUPPORTED_MODELS + +# Permissible parties for different models +MIDJOURNEY_ALLOWED_ASPECT_RATIOS = [ + "1:1", # Square + "16:9", # Widescreen format + "9:16", # Vertical variant of 16:9 + "16:10", # Alternative widescreen + "10:16", # Vertical variant of 16:10 + "8:5", # Alternative widescreen + "5:8", # Vertical variant of 16:10 + "3:4", # Portrait/print + "4:3", # Standard TV/monitor format + "3:2", # Popular in photography + "2:3", # Inverse of 3:2 + "4:5", # Common in social media posts + "5:4", # Nearly square format + "137:100", # Academy ratio (1.37:1) as an integer ratio + "166:100", # European cinema (1.66:1) as an integer ratio + "185:100", # Cinematic format (1.85:1) as an integer ratio185 + "83:50", # European cinema (1.66:1) as an integer ratio + "37:20", # Cinematic format (1.85:1) as an integer ratio + "2:1", # Maximum allowed widescreen format + "1:2" # Maximum allowed vertical format +] + +FLUX_ALLOWED_ASPECT_RATIOS = ["1:1", "16:9", "9:16", "3:2", "2:3", "3:4", "4:3", "4:5", "5:4"] +LEONARDO_ALLOWED_ASPECT_RATIOS = ["1:1", "4:3", "3:4"] + +# Permissible sizes for different models +DALLE2_SIZES = ["1024x1024", "512x512", "256x256"] +DALLE3_SIZES = ["1024x1024", "1024x1792", "1792x1024"] +LEONARDO_SIZES = ALBEDO_SIZES = {"1:1": "1024x1024", "4:3": "1024x768", "3:4": "768x1024"} + +# Determination of models for speech synthesis (TTS) +TEXT_TO_SPEECH_MODELS = [ + "tts-1" # , + # "tts-1-hd", + # "google-tts", + # "elevenlabs-tts" +] + +# Determination of models for speech recognition (STT) +SPEECH_TO_TEXT_MODELS = [ + "whisper-1" # , + # "latest_long", + # "latest_short", + # "phone_call", + # "telephony", + # "telephony_short", + # "medical_dictation", + # "medical_conversation" +] + +# Default values +SUBSET_OF_ONE_MIN_PERMITTED_MODELS = ["mistral-nemo", "gpt-4o-mini", "o3-mini", "deepseek-chat"] +PERMIT_MODELS_FROM_SUBSET_ONLY = False + +# Read environment variables +one_min_models_env = os.getenv( + "SUBSET_OF_ONE_MIN_PERMITTED_MODELS" +) # e.g. "mistral-nemo,gpt-4o,deepseek-chat" +permit_not_in_available_env = os.getenv( + "PERMIT_MODELS_FROM_SUBSET_ONLY" +) # e.g. "True" or "False" + +# Parse or fall back to defaults +if one_min_models_env: + SUBSET_OF_ONE_MIN_PERMITTED_MODELS = one_min_models_env.split(",") + +if permit_not_in_available_env and permit_not_in_available_env.lower() == "true": + PERMIT_MODELS_FROM_SUBSET_ONLY = True + +# Combine into a single list +AVAILABLE_MODELS = [] +AVAILABLE_MODELS.extend(SUBSET_OF_ONE_MIN_PERMITTED_MODELS) + +# Add cache to track processed images +# For each request, we keep a unique image identifier and its path +IMAGE_CACHE = {} +# Limit the size of the cache +MAX_CACHE_SIZE = 100 + + +@app.route("/", methods=["GET", "POST"]) +def index(): + if request.method == "POST": + return ERROR_HANDLER(1212) + if request.method == "GET": + internal_ip = socket.gethostbyname(socket.gethostname()) + return ( + "Congratulations! Your API is working! You can now make requests to the API.\n\nEndpoint: " + + internal_ip + + ":5001/v1" + ) + + +@app.route("/v1/models") +@limiter.limit("60 per minute") +def models(): + # Dynamically create the list of models with additional fields + models_data = [] + if not PERMIT_MODELS_FROM_SUBSET_ONLY: + one_min_models_data = [ + { + "id": model_name, + "object": "model", + "owned_by": "1minai", + "created": 1727389042, + } + for model_name in ALL_ONE_MIN_AVAILABLE_MODELS + ] + else: + one_min_models_data = [ + { + "id": model_name, + "object": "model", + "owned_by": "1minai", + "created": 1727389042, + } + for model_name in SUBSET_OF_ONE_MIN_PERMITTED_MODELS + ] + models_data.extend(one_min_models_data) + return jsonify({"data": models_data, "object": "list"}) + + +def ERROR_HANDLER(code, model=None, key=None): + # Handle errors in OpenAI-Structued Error + error_codes = { # Internal Error Codes + 1002: { + "message": f"The model {model} does not exist.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_found", + "http_code": 400, + }, + 1020: { + "message": f"Incorrect API key provided: {key}. You can find your API key at https://app.1min.ai/api.", + "type": "authentication_error", + "param": None, + "code": "invalid_api_key", + "http_code": 401, + }, + 1021: { + "message": "Invalid Authentication", + "type": "invalid_request_error", + "param": None, + "code": None, + "http_code": 401, + }, + 1212: { + "message": f"Incorrect Endpoint. Please use the /v1/chat/completions endpoint.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_supported", + "http_code": 400, + }, + 1044: { + "message": f"This model does not support image inputs.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_supported", + "http_code": 400, + }, + 1412: { + "message": f"No message provided.", + "type": "invalid_request_error", + "param": "messages", + "code": "invalid_request_error", + "http_code": 400, + }, + 1423: { + "message": f"No content in last message.", + "type": "invalid_request_error", + "param": "messages", + "code": "invalid_request_error", + "http_code": 400, + }, + } + error_data = { + k: v + for k, v in error_codes.get( + code, + { + "message": "Unknown error", + "type": "unknown_error", + "param": None, + "code": None, + }, + ).items() + if k != "http_code" + } # Remove http_code from the error data + logger.error( + f"An error has occurred while processing the user's request. Error code: {code}" + ) + return jsonify({"error": error_data}), error_codes.get(code, {}).get( + "http_code", 400 + ) # Return the error data without http_code inside the payload and get the http_code to return. + + +def format_conversation_history(messages, new_input): + """ + Formats the conversation history into a structured string. + + Args: + messages (list): List of message dictionaries from the request + new_input (str): The new user input message + + Returns: + str: Formatted conversation history + """ + formatted_history = [] + + for message in messages: + role = message.get("role", "") + content = message.get("content", "") + + # Handle potential list content + if isinstance(content, list): + processed_content = [] + for item in content: + if "text" in item: + processed_content.append(item["text"]) + content = "\n".join(processed_content) + + if role == "system": + formatted_history.append(f"System: {content}") + elif role == "user": + formatted_history.append(f"User: {content}") + elif role == "assistant": + formatted_history.append(f"Assistant: {content}") + + # Add new input if it is + if new_input: + formatted_history.append(f"User: {new_input}") + + # We return only the history of dialogue without additional instructions + return "\n".join(formatted_history) + + +def get_model_capabilities(model): + """ + Defines supported opportunities for a specific model + + Args: + Model: The name of the model + + Returns: + DICT: Dictionary with flags of supporting different features + """ + capabilities = { + "vision": False, + "code_interpreter": False, + "retrieval": False, + "function_calling": False, + } + + # We check the support of each opportunity through the corresponding arrays + capabilities["vision"] = model in vision_supported_models + capabilities["code_interpreter"] = model in code_interpreter_supported_models + capabilities["retrieval"] = model in retrieval_supported_models + capabilities["function_calling"] = model in function_calling_supported_models + + return capabilities + + +def prepare_payload( + request_data, model, all_messages, image_paths=None, request_id=None +): + """ + Prepares Payload for request, taking into account the capabilities of the model + + Args: + Request_Data: Request data + Model: Model + All_Messages: Posts of Posts + image_paths: ways to images + Request_id: ID query + + Returns: + DICT: Prepared Payload + """ + capabilities = get_model_capabilities(model) + + # Check the availability of Openai tools + tools = request_data.get("tools", []) + web_search = False + code_interpreter = False + + if tools: + for tool in tools: + tool_type = tool.get("type", "") + # Trying to include functions, but if they are not supported, we just log in + if tool_type == "retrieval": + if capabilities["retrieval"]: + web_search = True + logger.debug( + f"[{request_id}] Enabled web search due to retrieval tool" + ) + else: + logger.debug( + f"[{request_id}] Model {model} does not support web search, ignoring retrieval tool" + ) + elif tool_type == "code_interpreter": + if capabilities["code_interpreter"]: + code_interpreter = True + logger.debug(f"[{request_id}] Enabled code interpreter") + else: + logger.debug( + f"[{request_id}] Model {model} does not support code interpreter, ignoring tool" + ) + else: + logger.debug(f"[{request_id}] Ignoring unsupported tool: {tool_type}") + + # We check the direct parameters 1min.ai + if not web_search and request_data.get("web_search", False): + if capabilities["retrieval"]: + web_search = True + else: + logger.debug( + f"[{request_id}] Model {model} does not support web search, ignoring web_search parameter" + ) + + num_of_site = request_data.get("num_of_site", 3) + max_word = request_data.get("max_word", 500) + + # We form the basic Payload + if image_paths: + # Even if the model does not support images, we try to send as a text request + if capabilities["vision"]: + # Add instructions to the prompt field + enhanced_prompt = all_messages + if not enhanced_prompt.strip().startswith(IMAGE_DESCRIPTION_INSTRUCTION): + enhanced_prompt = f"{IMAGE_DESCRIPTION_INSTRUCTION}\n\n{all_messages}" + + payload = { + "type": "CHAT_WITH_IMAGE", + "model": model, + "promptObject": { + "prompt": enhanced_prompt, + "isMixed": False, + "imageList": image_paths, + "webSearch": web_search, + "numOfSite": num_of_site if web_search else None, + "maxWord": max_word if web_search else None, + }, + } + + if web_search: + logger.debug( + f"[{request_id}] Web search enabled in payload with numOfSite={num_of_site}, maxWord={max_word}") + else: + logger.debug( + f"[{request_id}] Model {model} does not support vision, falling back to text-only chat" + ) + payload = { + "type": "CHAT_WITH_AI", + "model": model, + "promptObject": { + "prompt": all_messages, + "isMixed": False, + "webSearch": web_search, + "numOfSite": num_of_site if web_search else None, + "maxWord": max_word if web_search else None, + }, + } + + if web_search: + logger.debug( + f"[{request_id}] Web search enabled in payload with numOfSite={num_of_site}, maxWord={max_word}") + elif code_interpreter: + # If Code_interpreter is requested and supported + payload = { + "type": "CODE_GENERATOR", + "model": model, + "conversationId": "CODE_GENERATOR", + "promptObject": {"prompt": all_messages}, + } + else: + # Basic text request + payload = { + "type": "CHAT_WITH_AI", + "model": model, + "promptObject": { + "prompt": all_messages, + "isMixed": False, + "webSearch": web_search, + "numOfSite": num_of_site if web_search else None, + "maxWord": max_word if web_search else None, + }, + } + + if web_search: + logger.debug( + f"[{request_id}] Web search enabled in payload with numOfSite={num_of_site}, maxWord={max_word}") + + return payload + + +def create_conversation_with_files(file_ids, title, model, api_key, request_id=None): + """ + Creates a new conversation with files + + Args: + File_ids: List of ID files + Title: The name of the conversation + Model: Model AI + API_KEY: API Key + Request_id: ID Request for Logging + + Returns: + STR: ID conversations or None in case of error + """ + request_id = request_id or str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Creating conversation with {len(file_ids)} files") + + try: + # We form Payload for a request with the right field names + payload = { + "title": title, + "type": "CHAT_WITH_PDF", + "model": model, + "fileIds": file_ids, # Using the correct name of the field 'Fileds' instead of 'Filelist' + } + + logger.debug(f"[{request_id}] Conversation payload: {json.dumps(payload)}") + + # We use the correct URL API C /API / + conversation_url = "https://api.1min.ai/api/features/conversations?type=CHAT_WITH_PDF" + + logger.debug(f"[{request_id}] Creating conversation using URL: {conversation_url}") + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + response = requests.post(conversation_url, json=payload, headers=headers) + + logger.debug(f"[{request_id}] Create conversation response status: {response.status_code}") + + if response.status_code != 200: + logger.error( + f"[{request_id}] Failed to create conversation: {response.status_code} - {response.text}" + ) + return None + + response_data = response.json() + logger.debug(f"[{request_id}] Conversation response data: {json.dumps(response_data)}") + + # Looking for ID conversations in different places of answer + conversation_id = None + if "conversation" in response_data and "uuid" in response_data["conversation"]: + conversation_id = response_data["conversation"]["uuid"] + elif "id" in response_data: + conversation_id = response_data["id"] + elif "uuid" in response_data: + conversation_id = response_data["uuid"] + + # Recursive search for ID conversations in the structure of the response + if not conversation_id: + def find_conversation_id(obj, path=""): + if isinstance(obj, dict): + if "id" in obj: + logger.debug(f"[{request_id}] Found ID at path '{path}.id': {obj['id']}") + return obj["id"] + if "uuid" in obj: + logger.debug(f"[{request_id}] Found UUID at path '{path}.uuid': {obj['uuid']}") + return obj["uuid"] + + for key, value in obj.items(): + result = find_conversation_id(value, f"{path}.{key}") + if result: + return result + elif isinstance(obj, list): + for i, item in enumerate(obj): + result = find_conversation_id(item, f"{path}[{i}]") + if result: + return result + return None + + conversation_id = find_conversation_id(response_data) + + if not conversation_id: + logger.error( + f"[{request_id}] Could not find conversation ID in response: {response_data}" + ) + return None + + logger.info( + f"[{request_id}] Conversation created successfully: {conversation_id}" + ) + return conversation_id + except Exception as e: + logger.error(f"[{request_id}] Error creating conversation: {str(e)}") + traceback.print_exc() + return None + + +@app.route("/v1/chat/completions", methods=["POST"]) +@limiter.limit("60 per minute") +def conversation(): + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/chat/completions") + + if not request.json: + return jsonify({"error": "Invalid request format"}), 400 + + # We extract information from the request + api_key = request.headers.get("Authorization", "").replace("Bearer ", "") + if not api_key: + logger.error(f"[{request_id}] No API key provided") + return jsonify({"error": "API key required"}), 401 + + try: + # Build Payload for request + request_data = request.json.copy() + + # We get and normalize the model + model = request_data.get("model", "").strip() + logger.info(f"[{request_id}] Using model: {model}") + + # We check the support of the web post for the model + capabilities = get_model_capabilities(model) + + # We check if the web post is requested through Openai tools + web_search_requested = False + tools = request_data.get("tools", []) + for tool in tools: + if tool.get("type") == "retrieval": + web_search_requested = True + logger.debug(f"[{request_id}] Web search requested via retrieval tool") + break + + # Check the presence of the Web_Search parameter + if not web_search_requested and request_data.get("web_search", False): + web_search_requested = True + logger.debug(f"[{request_id}] Web search requested via web_search parameter") + + # Add a clear web_search parameter if you are requested and supported by the model + if web_search_requested: + if capabilities["retrieval"]: + request_data["web_search"] = True + request_data["num_of_site"] = request_data.get("num_of_site", 1) + request_data["max_word"] = request_data.get("max_word", 1000) + logger.info(f"[{request_id}] Web search enabled for model {model}") + else: + logger.warning(f"[{request_id}] Model {model} does not support web search, ignoring request") + + # We extract the contents of the last message for possible generation of images + messages = request_data.get("messages", []) + prompt_text = "" + if messages and len(messages) > 0: + last_message = messages[-1] + if last_message.get("role") == "user": + content = last_message.get("content", "") + if isinstance(content, str): + prompt_text = content + elif isinstance(content, list): + # Collect all the text parts of the contents + for item in content: + if isinstance(item, dict) and "text" in item: + prompt_text += item["text"] + " " + prompt_text = prompt_text.strip() + + # We check whether the request contains the variation of the image + variation_match = None + if prompt_text: + # We are looking for the format of old teams /v1- /v4 + old_variation_match = re.search(r'/v([1-4])\s+(https?://[^\s]+)', prompt_text) + # We are looking for a format with square brackets [_v1 _]-[_ v4_] + square_variation_match = re.search(r'\[_V([1-4])_\]', prompt_text) + # We are looking for a new format with monoshyrin text `[_V1_]` -` [_V4_] ` + mono_variation_match = re.search(r'`\[_V([1-4])_\]`', prompt_text) + + # If a monoshyrin format is found, we check if there is a URL dialogue in the history + if mono_variation_match and request_data.get("messages"): + variation_number = int(mono_variation_match.group(1)) + logger.debug(f"[{request_id}] Found monospace format variation command: {variation_number}") + + # Looking for the necessary URL in previous messages of the assistant + image_url = None + for msg in reversed(request_data.get("messages", [])): + if msg.get("role") == "assistant" and msg.get("content"): + # Looking for all URL images in the content of the assistant message + content = msg.get("content", "") + # We use a more specific regular expression to search for images with the corresponding numbers + image_urls = [] + # First, we are looking for all URL images in standard Markdown format + url_matches = re.findall(r'!\[(?:Variation\s*(\d+)|[^]]*)\]\((https?://[^\s)]+)', content) + + # We convert the results to the list, taking into account variation rooms + for match in url_matches: + # If there is a variation number, we use it for indexing + variation_num = None + if match[0]: # If the variation number was found + try: + variation_num = int(match[0].strip()) + except ValueError: + pass + + # URL always the second element of the group + url = match[1] + + # Add to the list with the corresponding index or simply add to the end + if variation_num and 0 < variation_num <= 10: # Limit up to 10 variations maximum + # We expand the list to the desired length, if necessary + while len(image_urls) < variation_num: + image_urls.append(None) + image_urls[variation_num-1] = url + else: + image_urls.append(url) + + # We delete all None values ​​from the list + image_urls = [url for url in image_urls if url is not None] + + if image_urls: + # Check the URL number + if len(image_urls) >= variation_number: + # We take the URL corresponding to the requested number + image_url = image_urls[variation_number - 1] + logger.debug( + f"[{request_id}] Found image URL #{variation_number} in assistant message: {image_url}") + break + else: + # Not enough URL for the requested number, we take the first + image_url = image_urls[0] + logger.warning( + f"[{request_id}] Requested variation #{variation_number} but only found {len(image_urls)} URLs. Using first URL: {image_url}") + break + + if image_url: + variation_match = mono_variation_match + logger.info( + f"[{request_id}] Detected monospace variation command: {variation_number} for URL: {image_url}") + # If a format with square brackets is found, we check if there is a URL dialogue in the history + elif square_variation_match and request_data.get("messages"): + variation_number = int(square_variation_match.group(1)) + logger.debug(f"[{request_id}] Found square bracket format variation command: {variation_number}") + + # Looking for the necessary URL in previous messages of the assistant + image_url = None + for msg in reversed(request_data.get("messages", [])): + if msg.get("role") == "assistant" and msg.get("content"): + # Looking for all URL images in the content of the assistant message + content = msg.get("content", "") + url_matches = re.findall(r'!\[.*?\]\((https?://[^\s)]+)', content) + + if url_matches: + # Check the number of URL found + if len(url_matches) >= variation_number: + # We take the URL corresponding to the requested number + image_url = url_matches[variation_number - 1] + logger.debug( + f"[{request_id}] Found image URL #{variation_number} in assistant message: {image_url}") + break + else: + # Not enough URL for the requested number, we take the first + image_url = url_matches[0] + logger.warning( + f"[{request_id}] Requested variation #{variation_number} but only found {len(url_matches)} URLs. Using first URL: {image_url}") + break + + if image_url: + variation_match = square_variation_match + logger.info( + f"[{request_id}] Detected square bracket variation command: {variation_number} for URL: {image_url}") + # If the old format is found, we use it + elif old_variation_match: + variation_match = old_variation_match + variation_number = old_variation_match.group(1) + image_url = old_variation_match.group(2) + logger.info( + f"[{request_id}] Detected old format variation command: {variation_number} for URL: {image_url}") + + if variation_match: + # We process the variation of the image + try: + # We check what type of variation was discovered + if variation_match == mono_variation_match or variation_match == square_variation_match: + # URL has already been obtained above in the search process + variation_number = variation_match.group(1) + else: + # For the old format, we extract the URL directly from the team + variation_number = variation_match.group(1) + image_url = variation_match.group(2) + + logger.info(f"[{request_id}] Processing variation for image: {image_url}") + + # For Midjourney models, add a direct call of the API without downloading the image + if model.startswith("midjourney") and "asset.1min.ai" in image_url: + # We extract a relative path from the URL + path_match = re.search(r'(?:asset\.1min\.ai)/?(images/[^?#]+)', image_url) + if path_match: + relative_path = path_match.group(1) + logger.info(f"[{request_id}] Detected Midjourney variation with relative path: {relative_path}") + + # We get the saved generation parameters from Memcached by Request_id + saved_params = None + try: + # We extract image_id from the image path for searching for parameters + image_id_match = re.search(r'images/(\d+_\d+_\d+_\d+_\d+_\d+|\w+\d+)\.png', relative_path) + if image_id_match: + image_id = image_id_match.group(1) + logger.info(f"[{request_id}] Extracted image_id for variation: {image_id}") + gen_params_key = f"gen_params:{image_id}" + logger.info(f"[{request_id}] Looking for generation parameters with key: {gen_params_key}") + + # Check the presence of parameters in Memory_Storage directly + if gen_params_key in MEMORY_STORAGE: + stored_value = MEMORY_STORAGE[gen_params_key] + logger.info(f"[{request_id}] Found in MEMORY_STORAGE (type: {type(stored_value)}): {stored_value}") + + # If the value is a line, we try to convert it into a python dictionary + if isinstance(stored_value, str): + try: + saved_params = json.loads(stored_value) + logger.info(f"[{request_id}] Successfully parsed JSON string to dict") + except Exception as e: + logger.error(f"[{request_id}] Failed to parse JSON string: {e}") + saved_params = stored_value + else: + saved_params = stored_value + + logger.info(f"[{request_id}] Using parameters directly from MEMORY_STORAGE (type: {type(saved_params)}): {saved_params}") + else: + # If you are not found in Memory_Storage, we try it through Safe_memcache_oporation + logger.info(f"[{request_id}] Not found in MEMORY_STORAGE, trying safe_memcached_operation") + params_json = safe_memcached_operation('get', gen_params_key) + if params_json: + logger.info(f"[{request_id}] Retrieved parameters for image {image_id}: {params_json}") + if isinstance(params_json, str): + try: + saved_params = json.loads(params_json) + except: + saved_params = params_json + elif isinstance(params_json, bytes): + try: + saved_params = json.loads(params_json.decode('utf-8')) + except: + saved_params = params_json.decode('utf-8') + else: + saved_params = params_json + logger.info(f"[{request_id}] Retrieved generation parameters for image {image_id}: {saved_params}") + else: + logger.info(f"[{request_id}] No parameters found in storage for key {gen_params_key}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving generation parameters: {str(e)}") + + # We form Payload for variation + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": relative_path, + "mode": "fast", # Default Fast mode + "n": 4, + "isNiji6": False, + "aspect_width": 1, # By default 1: 1 + "aspect_height": 1, # By default 1: 1 + "maintainModeration": True + } + } + + # We use parameters from Memcache if they are available + if saved_params: + logger.info(f"[{request_id}] Using saved parameters from original generation: {saved_params}") + # We will transfer all the saved parameters + for param in ["mode", "aspect_width", "aspect_height", "isNiji6", "maintainModeration"]: + if param in saved_params: + old_value = payload["promptObject"].get(param) + payload["promptObject"][param] = saved_params[param] + logger.info(f"[{request_id}] Changed parameter {param} from {old_value} to {saved_params[param]}") + else: + logger.info(f"[{request_id}] No saved parameters found, using default ratio 1:1 for Midjourney variations") + # We use the ratio of 1: 1 + payload["promptObject"]["aspect_width"] = 1 + payload["promptObject"]["aspect_height"] = 1 + + # We send a request for variation directly + logger.info(f"[{request_id}] Sending direct Midjourney variation request: {json.dumps(payload)}") + + try: + variation_response = api_request( + "POST", + f"{ONE_MIN_API_URL}", + headers={"API-KEY": api_key, "Content-Type": "application/json"}, + json=payload, + timeout=MIDJOURNEY_TIMEOUT + ) + + if variation_response.status_code == 200: + # We process a successful answer + variation_data = variation_response.json() + logger.info(f"[{request_id}] Received Midjourney variation response: {json.dumps(variation_data)}") + + # We extract the URL variations + variation_urls = [] + + # Midjourney structure structure + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + record_detail = variation_data["aiRecord"]["aiRecordDetail"] + if "resultObject" in record_detail: + result = record_detail["resultObject"] + if isinstance(result, list): + variation_urls = result + elif isinstance(result, str): + variation_urls = [result] + + # An alternative search path + if not variation_urls and "resultObject" in variation_data: + result = variation_data["resultObject"] + if isinstance(result, list): + variation_urls = result + elif isinstance(result, str): + variation_urls = [result] + + if variation_urls: + logger.info(f"[{request_id}] Found {len(variation_urls)} variation URLs") + + # We form full URLs for display + full_variation_urls = [] + asset_host = "https://asset.1min.ai" + + for url in variation_urls: + # Create a full URL to display + if not url.startswith("http"): + full_url = f"{asset_host}/{url}" + else: + full_url = url + + full_variation_urls.append(full_url) + + # We form a response in Markdown format + markdown_text = "" + if len(full_variation_urls) == 1: + markdown_text = f"![Variation]({full_variation_urls[0]}) `[_V1_]`" + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** and send it (paste) in the next **prompt**" + else: + image_lines = [] + for i, url in enumerate(full_variation_urls): + image_lines.append(f"![Variation {i + 1}]({url}) `[_V{i + 1}_]`") + + markdown_text = "\n".join(image_lines) + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**" + + # We form an answer in Openai format + openai_response = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": markdown_text + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 0, + "completion_tokens": 0, + "total_tokens": 0 + } + } + + return jsonify(openai_response), 200 + else: + logger.error(f"[{request_id}] No variation URLs found in response") + else: + logger.error(f"[{request_id}] Direct variation request failed: {variation_response.status_code} - {variation_response.text}") + # When the Gateway Timeout (504) error, we return the error immediately, and do not continue to process + if variation_response.status_code == 504: + logger.error(f"[{request_id}] Midjourney API timeout (504). Returning error to client instead of fallback.") + return jsonify({ + "error": "Gateway Timeout (504) occurred while processing image variation request. Try again later." + }), 504 + # With an error with the ratio of the parties (409), we also return the error + elif variation_response.status_code == 409: + error_message = "Error creating image variation" + # Trying to extract an error from an answer + try: + error_json = variation_response.json() + if "message" in error_json: + error_message = error_json["message"] + except: + pass + logger.error(f"[{request_id}] Midjourney API error (409): {error_message}") + return jsonify({ + "error": f"Failed to create image variation: {error_message}" + }), 409 + except Exception as e: + logger.error(f"[{request_id}] Exception during direct variation request: {str(e)}") + # We return the error directly to the client instead of the transition to the backup path + return jsonify({ + "error": f"Error processing direct variation request: {str(e)}" + }), 500 + + # We convert the full URL to a relative path if it corresponds to the Asset.1Min.Ai format + image_path = None + if "asset.1min.ai" in image_url: + # We extract part of the path /images /... + path_match = re.search(r'(?:asset\.1min\.ai)(/images/[^?#]+)', image_url) + if path_match: + image_path = path_match.group(1) + # We remove the initial slash if it is + if image_path.startswith('/'): + image_path = image_path[1:] + else: + # We try to extract the path from the URL in general + path_match = re.search(r'/images/[^?#]+', image_url) + if path_match: + image_path = path_match.group(0) + # We remove the initial slash if it is + if image_path.startswith('/'): + image_path = image_path[1:] + + # If you find a relative path, we use it instead of a complete URL + download_url = image_url + if image_path: + logger.debug(f"[{request_id}] Extracted relative path from image URL: {image_path}") + # We use the full URL for loading, but we keep the relative path + + # Download the image to a temporary file and send a redirection + # On the route/v1/images/variations by analogy s/v1/images/generations + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") + img_response = requests.get(download_url, stream=True) + + if img_response.status_code != 200: + return jsonify( + {"error": f"Failed to download image from URL. Status code: {img_response.status_code}"}), 400 + + with open(temp_file.name, 'wb') as f: + for chunk in img_response.iter_content(chunk_size=8192): + f.write(chunk) + + # We save the path to the temporary file in memory for use in the route/v1/images/variations + variation_key = f"variation:{request_id}" + variation_data = { + "temp_file": temp_file.name, + "model": model, + "n": request_data.get("n", 1), + "image_path": image_path # We keep the relative path if it is + } + # We use Safe_MemCeched_Operation, which now supports Memory_storage + safe_memcached_operation('set', variation_key, variation_data, expiry=300) # Store 5 minutes + logger.debug(f"[{request_id}] Saved variation data with key: {variation_key}") + + # We redirect the route/v1/images/variations + logger.info(f"[{request_id}] Redirecting to /v1/images/variations with model {model}") + + # Add detailed logistics for diagnosis + logger.info(f"[{request_id}] Temp file path: {temp_file.name}, exists: {os.path.exists(temp_file.name)}") + logger.info(f"[{request_id}] Image path: {image_path}") + logger.info(f"[{request_id}] Variation data prepared with temp file and image path") + + return redirect(url_for('image_variations', request_id=request_id), code=307) + + except Exception as e: + logger.error(f"[{request_id}] Error processing variation command: {str(e)}") + return jsonify({"error": f"Failed to process variation command: {str(e)}"}), 500 + + + # We log in the extracted Prompt for debugging + logger.debug(f"[{request_id}] Extracted prompt text: {prompt_text[:100]}..." if len( + prompt_text) > 100 else f"[{request_id}] Extracted prompt text: {prompt_text}") + + # We check whether the model belongs to one of the special types + # For images generation models + if model in IMAGE_GENERATION_MODELS: + logger.info(f"[{request_id}] Redirecting image generation model to /v1/images/generations") + + # We create a new request only with the necessary fields to generate image + # We take only the current user's current production without combining with history + image_request = { + "model": model, + "prompt": prompt_text, # Only the current request + "n": request_data.get("n", 1), + "size": request_data.get("size", "1024x1024") + } + + # Add additional parameters for certain models + if model == "dall-e-3": + image_request["quality"] = request_data.get("quality", "standard") + image_request["style"] = request_data.get("style", "vivid") + + # We check the availability of special parameters in Prompt for models type Midjourney + if model.startswith("midjourney"): + # Add inspections and parameters for midjourney models + if "--ar" in prompt_text or "\u2014ar" in prompt_text: + logger.debug(f"[{request_id}] Found aspect ratio parameter in prompt") + elif request_data.get("aspect_ratio"): + image_request["aspect_ratio"] = request_data.get("aspect_ratio") + + if "--no" in prompt_text or "\u2014no" in prompt_text: + logger.debug(f"[{request_id}] Found negative prompt parameter in prompt") + elif request_data.get("negative_prompt"): + # Add negative prompt field as a separate parameter + image_request["negative_prompt"] = request_data.get("negative_prompt") + + # We delete messages from the request to avoid combining history + if "messages" in image_request: + del image_request["messages"] + + logger.debug(f"[{request_id}] Final image request: {json.dumps(image_request)[:200]}...") + + # We save a modified request (only the last request without history) + request.environ["body_copy"] = json.dumps(image_request) + return redirect(url_for('generate_image'), code=307) # 307 preserves the method and body of the request + + # For speech generation models (TTS) + if model in TEXT_TO_SPEECH_MODELS: + logger.info(f"[{request_id}] Processing text-to-speech request directly") + + if not prompt_text: + logger.error(f"[{request_id}] No input text provided for TTS") + return jsonify({"error": "No input text provided"}), 400 + + logger.debug(f"[{request_id}] TTS input text: {prompt_text[:100]}..." if len(prompt_text) > 100 else f"[{request_id}] TTS input text: {prompt_text}") + + voice = request_data.get("voice", "alloy") + response_format = request_data.get("response_format", "mp3") + speed = request_data.get("speed", 1.0) + + # We form Payload for a request to the API 1min.ai according to the documentation + payload = { + "type": "TEXT_TO_SPEECH", + "model": model, + "promptObject": { + "text": prompt_text, + "voice": voice, + "response_format": response_format, + "speed": speed + } + } + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + try: + # Send the request directly + logger.debug(f"[{request_id}] Sending direct TTS request to {ONE_MIN_API_URL}") + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug(f"[{request_id}] TTS response status code: {response.status_code}") + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + logger.error(f"[{request_id}] Error in TTS response: {response.text[:200]}") + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + # We get a URL audio from the answer + one_min_response = response.json() + audio_url = "" + + if "aiRecord" in one_min_response and "aiRecordDetail" in one_min_response["aiRecord"]: + result_object = one_min_response["aiRecord"]["aiRecordDetail"].get("resultObject", "") + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + else: + audio_url = result_object + elif "resultObject" in one_min_response: + result_object = one_min_response["resultObject"] + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + else: + audio_url = result_object + + if not audio_url: + logger.error(f"[{request_id}] Could not extract audio URL from API response") + return jsonify({"error": "Could not extract audio URL"}), 500 + + # Instead of downloading audio, we form a response with Markdown + logger.info(f"[{request_id}] Successfully generated speech audio URL: {audio_url}") + + # We get a full URL for the audio file + try: + # We check for the presence of a complete signed link in the response of the API + signed_url = None + + # Check the availability of the Temporaryurl field in the answer (according to the API response format) + if "temporaryUrl" in one_min_response: + signed_url = one_min_response["temporaryUrl"] + logger.debug(f"[{request_id}] Found temporaryUrl in API response root") + elif "result" in one_min_response and "resultList" in one_min_response["result"]: + # Check in the list of results + for item in one_min_response["result"]["resultList"]: + if item.get("type") == "TEXT_TO_SPEECH" and "temporaryUrl" in item: + signed_url = item["temporaryUrl"] + logger.debug(f"[{request_id}] Found temporaryUrl in resultList") + break + + # Checking in Airecord, if there are no links in the main places + if not signed_url and "aiRecord" in one_min_response: + if "temporaryUrl" in one_min_response["aiRecord"]: + signed_url = one_min_response["aiRecord"]["temporaryUrl"] + logger.debug(f"[{request_id}] Found temporaryUrl in aiRecord") + + # We check other possible fields for reverse compatibility + if not signed_url: + # We are looking for in various places in the API response format + if "aiRecord" in one_min_response and "aiRecordDetail" in one_min_response["aiRecord"]: + if "signedUrls" in one_min_response["aiRecord"]["aiRecordDetail"]: + signed_urls = one_min_response["aiRecord"]["aiRecordDetail"]["signedUrls"] + if isinstance(signed_urls, list) and signed_urls: + signed_url = signed_urls[0] + elif isinstance(signed_urls, str): + signed_url = signed_urls + elif "signedUrl" in one_min_response["aiRecord"]["aiRecordDetail"]: + signed_url = one_min_response["aiRecord"]["aiRecordDetail"]["signedUrl"] + elif "signedUrls" in one_min_response: + signed_urls = one_min_response["signedUrls"] + if isinstance(signed_urls, list) and signed_urls: + signed_url = signed_urls[0] + elif isinstance(signed_urls, str): + signed_url = signed_urls + elif "signedUrl" in one_min_response: + signed_url = one_min_response["signedUrl"] + + # We use the received signed link or basic URL + if signed_url: + full_audio_url = signed_url + logger.debug(f"[{request_id}] Using signed URL from API: {signed_url[:100]}...") + else: + # If there is no signed link, we use the basic URL in S3 format + # Although without a signature, he will most likely not work + full_audio_url = f"https://s3.us-east-1.amazonaws.com/asset.1min.ai/{audio_url}" + logger.warning(f"[{request_id}] No signed URL found, using base S3 URL: {full_audio_url}") + + except Exception as e: + logger.error(f"[{request_id}] Error processing audio URL: {str(e)}") + full_audio_url = f"https://asset.1min.ai/{audio_url}" + logger.warning(f"[{request_id}] Error occurred, using fallback URL: {full_audio_url}") + + # We form a response in the format similar to Chat Complets + completion_response = { + "id": f"chatcmpl-{request_id}", + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": f"🔊 [Audio.mp3]({full_audio_url})" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": len(prompt_text.split()), + "completion_tokens": 1, + "total_tokens": len(prompt_text.split()) + 1 + } + } + + return jsonify(completion_response) + + except Exception as e: + logger.error(f"[{request_id}] Exception during TTS request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + # For models of audio transcription (STT) + if model in SPEECH_TO_TEXT_MODELS: + logger.info(f"[{request_id}] Redirecting speech-to-text model to /v1/audio/transcriptions") + return redirect(url_for('audio_transcriptions'), code=307) + + # Let's journal the beginning of the request + logger.debug(f"[{request_id}] Processing chat completion request") + + # Check whether the image of the image contains + image = False + image_paths = [] + + # Check the availability of user files for working with PDF + user_file_ids = [] + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + user_key = f"user:{api_key}" + user_files_json = safe_memcached_operation('get', user_key) + if user_files_json: + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + + if user_files and isinstance(user_files, list): + # We extract the ID files + user_file_ids = [file_info.get("id") for file_info in user_files if file_info.get("id")] + logger.debug(f"[{request_id}] Found user files: {user_file_ids}") + except Exception as e: + logger.error(f"[{request_id}] Error parsing user files from memcached: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving user files from memcached: {str(e)}") + else: + logger.debug(f"[{request_id}] Memcached not available, no user files loaded") + + # We check the availability of messages before the start of processing + if not messages: + logger.error(f"[{request_id}] No messages provided in request") + return ERROR_HANDLER(1412) + + # We extract the text of the request for analysis + extracted_prompt = messages[-1].get("content", "") + if isinstance(extracted_prompt, list): + extracted_prompt = " ".join([item.get("text", "") for item in extracted_prompt if "text" in item]) + extracted_prompt_lower = extracted_prompt.lower() if extracted_prompt else "" + + # If the request does not indicate File_ids, but the user has uploaded files, + # Add them to the request only if the message mentions something about files or documents + file_keywords = ["файл", "файлы", "file", "files", "документ", "документы", "document", "documents"] + prompt_has_file_keywords = False + + # Check the availability of keywords about files in the request + if extracted_prompt_lower: + prompt_has_file_keywords = any(keyword in extracted_prompt_lower for keyword in file_keywords) + + # Add files only if the user requested work with files or clearly indicated File_ids + if (not request_data.get("file_ids") and user_file_ids and prompt_has_file_keywords): + logger.info(f"[{request_id}] Adding user files to request: {user_file_ids}") + request_data["file_ids"] = user_file_ids + elif not request_data.get("file_ids") and user_file_ids: + logger.debug(f"[{request_id}] User has files but didn't request to use them in this message") + + # We get the contents of the last message for further processing + user_input = messages[-1].get("content") + if not user_input: + logger.error(f"[{request_id}] No content in last message") + return ERROR_HANDLER(1423) + + # We form the history of dialogue + all_messages = format_conversation_history( + request_data.get("messages", []), request_data.get("new_input", "") + ) + + # Checking for the presence of images in the last message + if isinstance(user_input, list): + logger.debug( + f"[{request_id}] Processing message with multiple content items (text/images)" + ) + combined_text = "" + for i, item in enumerate(user_input): + if "text" in item: + combined_text += item["text"] + "\n" + logger.debug(f"[{request_id}] Added text content from item {i + 1}") + + if "image_url" in item: + if model not in vision_supported_models: + logger.error( + f"[{request_id}] Model {model} does not support images" + ) + return ERROR_HANDLER(1044, model) + + # Create a hash url image for caching + image_key = None + image_url = None + + # We extract the URL images + if ( + isinstance(item["image_url"], dict) + and "url" in item["image_url"] + ): + image_url = item["image_url"]["url"] + else: + image_url = item["image_url"] + + # Heshchit url for the cache + if image_url: + image_key = hashlib.md5(image_url.encode("utf-8")).hexdigest() + + # Check the cache + if image_key and image_key in IMAGE_CACHE: + cached_path = IMAGE_CACHE[image_key] + logger.debug( + f"[{request_id}] Using cached image path for item {i + 1}: {cached_path}" + ) + image_paths.append(cached_path) + image = True + continue + + # We load the image if it is not in the cache + logger.debug( + f"[{request_id}] Processing image URL in item {i + 1}: {image_url[:30]}..." + ) + + # We load the image + image_path = retry_image_upload( + image_url, api_key, request_id=request_id + ) + + if image_path: + # We save in the cache + if image_key: + IMAGE_CACHE[image_key] = image_path + # Clean the old notes if necessary + if len(IMAGE_CACHE) > MAX_CACHE_SIZE: + old_key = next(iter(IMAGE_CACHE)) + del IMAGE_CACHE[old_key] + + image_paths.append(image_path) + image = True + logger.debug( + f"[{request_id}] Image {i + 1} successfully processed: {image_path}" + ) + else: + logger.error(f"[{request_id}] Failed to upload image {i + 1}") + + # We replace user_input with the textual part only if it is not empty + if combined_text: + user_input = combined_text + + # We check if there is File_ids for a chat with documents + file_ids = request_data.get("file_ids", []) + conversation_id = request_data.get("conversation_id", None) + + # We extract the text of the request for the analysis of keywords + prompt_text = all_messages.lower() + extracted_prompt = messages[-1].get("content", "") + if isinstance(extracted_prompt, list): + extracted_prompt = " ".join([item.get("text", "") for item in extracted_prompt if "text" in item]) + extracted_prompt = extracted_prompt.lower() + + logger.debug(f"[{request_id}] Extracted prompt text: {extracted_prompt}") + + # We check the file deletion request + delete_keywords = ["удалить", "удали", "удаление", "очисти", "очистка", "delete", "remove", "clean"] + file_keywords = ["файл", "файлы", "file", "files", "документ", "документы", "document", "documents"] + mime_type_keywords = ["pdf", "txt", "doc", "docx", "csv", "xls", "xlsx", "json", "md", "html", "htm", "xml", + "pptx", "ppt", "rtf"] + + # Combine all keywords for files + all_file_keywords = file_keywords + mime_type_keywords + + # We check the request for file deletion (there must be keywords of deletion and file keywords) + has_delete_keywords = any(keyword in extracted_prompt for keyword in delete_keywords) + has_file_keywords = any(keyword in extracted_prompt for keyword in all_file_keywords) + + if has_delete_keywords and has_file_keywords and user_file_ids: + logger.info(f"[{request_id}] Deletion request detected, removing all user files") + + # Trying to get ID teams + team_id = None + try: + # Trying to get ID commands through API + teams_url = f"{ONE_MIN_API_URL}/teams" + teams_headers = {"API-KEY": api_key} + teams_response = api_request("GET", teams_url, headers=teams_headers) + if teams_response.status_code == 200: + teams_data = teams_response.json() + if "data" in teams_data and teams_data["data"]: + team_id = teams_data["data"][0].get("id") + logger.debug(f"[{request_id}] Found team ID for deletion: {team_id}") + except Exception as e: + logger.error(f"[{request_id}] Error getting team ID for deletion: {str(e)}") + + deleted_files = [] + for file_id in user_file_ids: + try: + # We form a URL to delete the file depending on the availability of Team_id + if team_id: + delete_url = f"{ONE_MIN_API_URL}/teams/{team_id}/assets/{file_id}" + else: + delete_url = f"{ONE_MIN_ASSET_URL}/{file_id}" + + logger.debug(f"[{request_id}] Using URL for deletion: {delete_url}") + headers = {"API-KEY": api_key} + + delete_response = api_request("DELETE", delete_url, headers=headers) + + if delete_response.status_code == 200: + logger.info(f"[{request_id}] Successfully deleted file: {file_id}") + deleted_files.append(file_id) + else: + logger.error(f"[{request_id}] Failed to delete file {file_id}: {delete_response.status_code}") + except Exception as e: + logger.error(f"[{request_id}] Error deleting file {file_id}: {str(e)}") + + # Clean the user's list of user files in Memcache + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None and deleted_files: + try: + user_key = f"user:{api_key}" + safe_memcached_operation('set', user_key, json.dumps([])) + logger.info(f"[{request_id}] Cleared user files list in memcached") + except Exception as e: + logger.error(f"[{request_id}] Error clearing user files in memcached: {str(e)}") + + # Send a response to file deletion + return jsonify({ + "id": str(uuid.uuid4()), + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": f"Удалено файлов: {len(deleted_files)}. Список файлов очищен." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": calculate_token(prompt_text), + "completion_tokens": 20, + "total_tokens": calculate_token(prompt_text) + 20 + } + }), 200 + + # We check the request for keywords for file processing + has_file_reference = any(keyword in extracted_prompt for keyword in all_file_keywords) + + # If there is File_ids and the request contains keywords about files or there are ID conversations, we use Chat_with_PDF + if file_ids and len(file_ids) > 0: + logger.debug( + f"[{request_id}] Creating CHAT_WITH_PDF request with {len(file_ids)} files" + ) + + # Add instructions for working with documents to Prompt + enhanced_prompt = all_messages + if not enhanced_prompt.strip().startswith(DOCUMENT_ANALYSIS_INSTRUCTION): + enhanced_prompt = f"{DOCUMENT_ANALYSIS_INSTRUCTION}\n\n{all_messages}" + + # We get the user Team_id + team_id = None + try: + teams_url = "https://api.1min.ai/api/teams" # Correct URL C /API / + teams_headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + logger.debug(f"[{request_id}] Fetching team ID from: {teams_url}") + teams_response = requests.get(teams_url, headers=teams_headers) + + if teams_response.status_code == 200: + teams_data = teams_response.json() + if "data" in teams_data and teams_data["data"]: + team_id = teams_data["data"][0].get("id") + logger.debug(f"[{request_id}] Got team ID: {team_id}") + else: + logger.warning( + f"[{request_id}] Failed to get team ID: {teams_response.status_code} - {teams_response.text}") + except Exception as e: + logger.error(f"[{request_id}] Error getting team ID: {str(e)}") + + # If there is no Conversation_id, we create a new conversation + if not conversation_id: + conversation_id = create_conversation_with_files( + file_ids, "Chat with documents", model, api_key, request_id + ) + if not conversation_id: + return ( + jsonify({"error": "Failed to create conversation with files"}), + 500, + ) + + # We form Payload to request files + payload = {"message": enhanced_prompt} + if conversation_id: + payload["conversationId"] = conversation_id + + # We use the correct URL API C /API / + api_url = "https://api.1min.ai/api/features/conversations/messages" + # Add Conversationid as a request parameter + api_params = {"conversationId": conversation_id} + + logger.debug( + f"[{request_id}] Sending message to conversation using URL: {api_url} with params: {api_params}") + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # Depending on the Stream parameter, select the request method + if stream: + # Streaming request + return streaming_request( + api_url, payload, headers, request_id, model, model_settings, api_params=api_params + ) + else: + # The usual request + try: + response = requests.post(api_url, json=payload, headers=headers, params=api_params) + + logger.debug(f"[{request_id}] API response status code: {response.status_code}") + if response.status_code != 200: + logger.error( + f"[{request_id}] API error: {response.status_code} - {response.text}" + ) + return ( + jsonify({"error": "API request failed", "details": response.text}), + response.status_code, + ) + + # We convert the answer to the Openai format + response_data = response.json() + logger.debug(f"[{request_id}] Raw API response: {json.dumps(response_data)[:500]}...") + + # We extract a response from different places of data structure + ai_response = None + if "answer" in response_data: + ai_response = response_data["answer"] + elif "message" in response_data: + ai_response = response_data["message"] + elif "result" in response_data: + ai_response = response_data["result"] + elif "aiRecord" in response_data and "aiRecordDetail" in response_data["aiRecord"]: + ai_response = response_data["aiRecord"]["aiRecordDetail"].get("answer", "") + + if not ai_response: + # Recursively looking for a response on Keys Asswer, Message, Result + def find_response(obj, path=""): + if isinstance(obj, dict): + for key in ["answer", "message", "result"]: + if key in obj: + logger.debug(f"[{request_id}] Found response at path '{path}.{key}'") + return obj[key] + + for key, value in obj.items(): + result = find_response(value, f"{path}.{key}") + if result: + return result + elif isinstance(obj, list): + for i, item in enumerate(obj): + result = find_response(item, f"{path}[{i}]") + if result: + return result + return None + + ai_response = find_response(response_data) + + if not ai_response: + logger.error(f"[{request_id}] Could not extract AI response from API response") + return jsonify({"error": "Could not extract AI response"}), 500 + + openai_response = format_openai_response( + ai_response, model, request_id + ) + return jsonify(openai_response) + except Exception as e: + logger.error( + f"[{request_id}] Exception while processing API response: {str(e)}" + ) + traceback.print_exc() + return jsonify({"error": str(e)}), 500 + + # Counting tokens + prompt_token = calculate_token(str(all_messages)) + + # Checking the model + if PERMIT_MODELS_FROM_SUBSET_ONLY and model not in AVAILABLE_MODELS: + return ERROR_HANDLER(1002, model) + + logger.debug( + f"[{request_id}] Processing {prompt_token} prompt tokens with model {model}" + ) + + # Prepare Payload, taking into account the capabilities of the model + payload = prepare_payload( + request_data, model, all_messages, image_paths, request_id + ) + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # Request depending on Stream + if not request_data.get("stream", False): + # The usual request + logger.debug( + f"[{request_id}] Sending non-streaming request to {ONE_MIN_API_URL}" + ) + + try: + response = api_request( + "POST", ONE_MIN_API_URL, json=payload, headers=headers + ) + logger.debug( + f"[{request_id}] Response status code: {response.status_code}" + ) + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + try: + error_content = response.json() + logger.error(f"[{request_id}] Error response: {error_content}") + except: + logger.error( + f"[{request_id}] Could not parse error response as JSON" + ) + return ERROR_HANDLER(response.status_code) + + one_min_response = response.json() + transformed_response = transform_response( + one_min_response, request_data, prompt_token + ) + + response = make_response(jsonify(transformed_response)) + set_response_headers(response) + return response, 200 + except Exception as e: + logger.error(f"[{request_id}] Exception during request: {str(e)}") + return jsonify({"error": str(e)}), 500 + else: + # Streaming request + logger.debug(f"[{request_id}] Sending streaming request") + + # URL for streaming mode + streaming_url = f"{ONE_MIN_API_URL}?isStreaming=true" + + logger.debug(f"[{request_id}] Streaming URL: {streaming_url}") + logger.debug(f"[{request_id}] Payload: {json.dumps(payload)[:200]}...") + + # If a web pion is included, we display a full websearch block for debugging + if "promptObject" in payload and payload["promptObject"].get("webSearch"): + logger.info(f"[{request_id}] Web search parameters in payload: " + + f"webSearch={payload['promptObject'].get('webSearch')}, " + + f"numOfSite={payload['promptObject'].get('numOfSite')}, " + + f"maxWord={payload['promptObject'].get('maxWord')}") + + try: + # We use a session to control the connection + session = create_session() + response_stream = session.post( + streaming_url, json=payload, headers=headers, stream=True + ) + + logger.debug( + f"[{request_id}] Streaming response status code: {response_stream.status_code}" + ) + + if response_stream.status_code != 200: + if response_stream.status_code == 401: + session.close() + return ERROR_HANDLER(1020, key=api_key) + + logger.error( + f"[{request_id}] Error status code: {response_stream.status_code}" + ) + try: + error_content = response_stream.json() + logger.error(f"[{request_id}] Error response: {error_content}") + except: + logger.error( + f"[{request_id}] Could not parse error response as JSON" + ) + + session.close() + return ERROR_HANDLER(response_stream.status_code) + + # We transfer the session to Generator + return Response( + stream_response( + response_stream, request_data, model, prompt_token, session + ), + content_type="text/event-stream", + ) + except Exception as e: + logger.error( + f"[{request_id}] Exception during streaming request: {str(e)}" + ) + return jsonify({"error": str(e)}), 500 + except Exception as e: + logger.error( + f"[{request_id}] Exception during conversation processing: {str(e)}" + ) + traceback.print_exc() + return ( + jsonify({"error": f"Error during conversation processing: {str(e)}"}), + 500, + ) + + +def parse_aspect_ratio(prompt, model, request_data, request_id=None): + """ + Extracts the ratio of the parties from the request or prompt and checks its validity + + Args: + PROMPT (STR): Request text + Model (str): the name of the image generation model + Request_Data (DICT): Request data + Request_id (Str, Optional): ID Request for Logging + + Returns: + tuple: (modified Prompt, parties ratio, image size, error message, mode) + """ + # Default values + aspect_ratio = None + size = request_data.get("size", "1024x1024") + ar_error = None + mode = None + + # We are looking for the parameters of the mode in the text + mode_match = re.search(r'(--|\u2014)(fast|relax)\s*', prompt) + if mode_match: + mode = mode_match.group(2) + # We delete the parameter of the process from the prompt + prompt = re.sub(r'(--|\u2014)(fast|relax)\s*', '', prompt).strip() + logger.debug(f"[{request_id}] Extracted mode from prompt: {mode}") + + # We are trying to extract the ratio of the parties from Prompt + ar_match = re.search(r'(--|\u2014)ar\s+(\d+):(\d+)', prompt) + if ar_match: + width = int(ar_match.group(2)) + height = int(ar_match.group(3)) + + # We check that the ratio does not exceed 2: 1 or 1: 2 + if max(width, height) / min(width, height) > 2: + ar_error = "Aspect ratio cannot exceed 2:1 or 1:2" + logger.error(f"[{request_id}] Invalid aspect ratio: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + + # We check that the values ​​in the permissible range + if width < 1 or width > 10000 or height < 1 or height > 10000: + ar_error = "Aspect ratio values must be between 1 and 10000" + logger.error(f"[{request_id}] Invalid aspect ratio values: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + + # Install the ratio of the parties + aspect_ratio = f"{width}:{height}" + + # We delete the parameter from prompt + prompt = re.sub(r'(--|\u2014)ar\s+\d+:\d+\s*', '', prompt).strip() + + logger.debug(f"[{request_id}] Extracted aspect ratio: {aspect_ratio}") + + # If there is no ratio in Prompta, we check in the request + elif "aspect_ratio" in request_data: + aspect_ratio = request_data.get("aspect_ratio") + + # We check that the ratio in the correct format + if not re.match(r'^\d+:\d+$', aspect_ratio): + ar_error = "Aspect ratio must be in format width:height" + logger.error(f"[{request_id}] Invalid aspect ratio format: {aspect_ratio} - {ar_error}") + return prompt, None, size, ar_error, mode + + width, height = map(int, aspect_ratio.split(':')) + + # We check that the ratio does not exceed 2: 1 or 1: 2 + if max(width, height) / min(width, height) > 2: + ar_error = "Aspect ratio cannot exceed 2:1 or 1:2" + logger.error(f"[{request_id}] Invalid aspect ratio: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + + # We check that the values ​​in the permissible range + if width < 1 or width > 10000 or height < 1 or height > 10000: + ar_error = "Aspect ratio values must be between 1 and 10000" + logger.error(f"[{request_id}] Invalid aspect ratio values: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + + logger.debug(f"[{request_id}] Using aspect ratio from request: {aspect_ratio}") + + # We delete all other possible modifiers of parameters + # Remove negative promptists (-no or –no) + prompt = re.sub(r'(--|\u2014)no\s+.*?(?=(--|\u2014)|$)', '', prompt).strip() + + # For models Dall-E 3, set the corresponding dimensions + if model == "dall-e-3" and aspect_ratio: + width, height = map(int, aspect_ratio.split(':')) + + # We round to the nearest permissible ratio for Dall-E 3 + if abs(width / height - 1) < 0.1: # square + size = "1024x1024" + aspect_ratio = "square" + elif width > height: # Album orientation + size = "1792x1024" + aspect_ratio = "landscape" + else: # Portrait orientation + size = "1024x1792" + aspect_ratio = "portrait" + + logger.debug(f"[{request_id}] Adjusted size for DALL-E 3: {size}, aspect_ratio: {aspect_ratio}") + + # For Leonardo models, we set the corresponding dimensions based on the ratio of the parties + elif (model in [ + "6b645e3a-d64f-4341-a6d8-7a3690fbf042", "phoenix", # Leonardo.ai - Phoenix + "b24e16ff-06e3-43eb-8d33-4416c2d75876", "lightning-xl", # Leonardo.ai - Lightning XL + "5c232a9e-9061-4777-980a-ddc8e65647c6", "vision-xl", # Leonardo.ai - Vision XL + "e71a1c2f-4f80-4800-934f-2c68979d8cc8", "anime-xl", # Leonardo.ai - Anime XL + "1e60896f-3c26-4296-8ecc-53e2afecc132", "diffusion-xl", # Leonardo.ai - Diffusion XL + "aa77f04e-3eec-4034-9c07-d0f619684628", "kino-xl", # Leonardo.ai - Kino XL + "2067ae52-33fd-4a82-bb92-c2c55e7d2786", "albedo-base-xl" # Leonardo.ai - Albedo Base XL + ]) and aspect_ratio: + # Determine the size based on the ratio of the parties + if aspect_ratio == "1:1": + size = LEONARDO_SIZES["1:1"] # "1024x1024" + elif aspect_ratio == "4:3": + size = LEONARDO_SIZES["4:3"] # "1024x768" + elif aspect_ratio == "3:4": + size = LEONARDO_SIZES["3:4"] # "768x1024" + # For other ratios, we round to the nearest supported + else: + width, height = map(int, aspect_ratio.split(':')) + ratio = width / height + + if abs(ratio - 1) < 0.1: # Close to 1: 1 + size = LEONARDO_SIZES["1:1"] # "1024x1024" + aspect_ratio = "1:1" + elif ratio > 1: # The width is greater than the height (album orientation) + size = LEONARDO_SIZES["4:3"] # "1024x768" + aspect_ratio = "4:3" + else: # The height is greater than the width (portrait orientation) + size = LEONARDO_SIZES["3:4"] # "768x1024" + aspect_ratio = "3:4" + + logger.debug(f"[{request_id}] Adjusted size for Leonardo model: {size}, aspect_ratio: {aspect_ratio}") + + return prompt, aspect_ratio, size, ar_error, mode + + +@app.route("/v1/images/generations", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def generate_image(): + """ + Route for generating images + """ + if request.method == "OPTIONS": + return handle_options_request() + + # Create a unique ID for request + request_id = str(uuid.uuid4()) + logger.info(f"[{request_id}] Received request: /v1/images/generations") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # Verification that the data is transmitted in the correct format + if request.is_json: + request_data = request.get_json() + else: + logger.error(f"[{request_id}] Request content-type is not application/json") + return jsonify({"error": "Content-type must be application/json"}), 400 + + # We get the necessary parameters from the request + model = request_data.get("model", "dall-e-3").strip() + prompt = request_data.get("prompt", "").strip() + + # If the request was redirected from the Conversation function, + # We must take only the last request of the user without history + if request.environ.get("HTTP_REFERER") and "chat/completions" in request.environ.get("HTTP_REFERER"): + logger.debug(f"[{request_id}] Request came from chat completions, isolating the prompt") + # We do not combine prompt depths, but we take only the last user request + + # Determine the presence of negative prompts (if any) + negative_prompt = None + no_match = re.search(r'(--|\u2014)no\s+(.*?)(?=(--|\u2014)|$)', prompt) + if no_match: + negative_prompt = no_match.group(2).strip() + # We delete negative prompt plate from the main text + prompt = re.sub(r'(--|\u2014)no\s+.*?(?=(--|\u2014)|$)', '', prompt).strip() + + # We process the ratio of the parties and the size + prompt, aspect_ratio, size, ar_error, mode = parse_aspect_ratio(prompt, model, request_data, request_id) + + # If there was an error in processing the ratio of the parties, we return it to the user + if ar_error: + return jsonify({"error": ar_error}), 400 + + # Checking the availability of promptpus + if not prompt: + # We check if there is a prompt in messages + messages = request_data.get("messages", []) + if messages and len(messages) > 0: + # We take only the last user message + last_message = messages[-1] + if last_message.get("role") == "user": + content = last_message.get("content", "") + if isinstance(content, str): + prompt = content + elif isinstance(content, list): + # Collect all the text parts of the contents + text_parts = [] + for item in content: + if isinstance(item, dict) and "text" in item: + text_parts.append(item["text"]) + prompt = " ".join(text_parts) + + # We process the parameters in Prompt from the message + negative_prompt = None + no_match = re.search(r'(--|\u2014)no\s+(.*?)(?=(--|\u2014)|$)', prompt) + if no_match: + negative_prompt = no_match.group(2).strip() + + # We re -process the prompt plate to delete modifiers + prompt, aspect_ratio, size, ar_error, mode = parse_aspect_ratio(prompt, model, request_data, request_id) + + if ar_error: + return jsonify({"error": ar_error}), 400 + + if prompt: + logger.debug(f"[{request_id}] Found prompt in messages: {prompt[:100]}..." if len( + prompt) > 100 else f"[{request_id}] Found prompt in messages: {prompt}") + else: + logger.error(f"[{request_id}] No prompt provided") + return jsonify({"error": "A prompt is required to generate an image"}), 400 + + logger.info(f"[{request_id}] Using model: {model}, prompt: '{prompt}'") + + try: + # Determine the URL for different models + api_url = f"{ONE_MIN_API_URL}" + + # Tysout 15 minutes for all images generation models + timeout = MIDJOURNEY_TIMEOUT + + # We form Payload for request depending on the model + if model == "dall-e-3": + payload = { + "type": "IMAGE_GENERATOR", + "model": "dall-e-3", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 1), + "size": size or request_data.get("size", "1024x1024"), + "quality": request_data.get("quality", "standard"), + "style": request_data.get("style", "vivid"), + }, + } + elif model == "dall-e-2": + payload = { + "type": "IMAGE_GENERATOR", + "model": "dall-e-2", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 1), + "size": size or request_data.get("size", "1024x1024"), + }, + } + elif model == "stable-diffusion-xl-1024-v1-0": + payload = { + "type": "IMAGE_GENERATOR", + "model": "stable-diffusion-xl-1024-v1-0", + "promptObject": { + "prompt": prompt, + "samples": request_data.get("n", 1), + "size": size or request_data.get("size", "1024x1024"), + "cfg_scale": request_data.get("cfg_scale", 7), + "clip_guidance_preset": request_data.get( + "clip_guidance_preset", "NONE" + ), + "seed": request_data.get("seed", 0), + "steps": request_data.get("steps", 30), + }, + } + elif model == "stable-diffusion-v1-6": + payload = { + "type": "IMAGE_GENERATOR", + "model": "stable-diffusion-v1-6", + "promptObject": { + "prompt": prompt, + "samples": request_data.get("n", 1), + "cfg_scale": request_data.get("cfg_scale", 7), + "clip_guidance_preset": request_data.get( + "clip_guidance_preset", "NONE" + ), + "height": request_data.get("height", 512), + "width": request_data.get("width", 512), + "seed": request_data.get("seed", 0), + "steps": request_data.get("steps", 30), + }, + } + elif model in ["midjourney", "midjourney_6_1"]: + # Permissible parties for the Midjourney + + # Default values + aspect_width = 1 + aspect_height = 1 + no_param = "" + + # If the ratio of the parties is indicated + if aspect_ratio: + # We break the parties to the width and height ratio + ar_parts = aspect_ratio.split(":") + aspect_width = int(ar_parts[0]) + aspect_height = int(ar_parts[1]) + + model_name = "midjourney" if model == "midjourney" else "midjourney_6_1" + + # Add logistics for the mode + logger.info(f"[{request_id}] Midjourney generation payload:") + logger.info(f"[{request_id}] Using mode from prompt: {mode}") + + payload = { + "type": "IMAGE_GENERATOR", + "model": model_name, + "promptObject": { + "prompt": prompt, + "mode": mode or request_data.get("mode", "fast"), + # We use the mode of prompt plate or from REQUEST_DATA + "n": 4, # Midjourney always generates 4 images + "aspect_width": aspect_width, + "aspect_height": aspect_height, + "isNiji6": request_data.get("isNiji6", False), + "maintainModeration": request_data.get("maintainModeration", True), + "image_weight": request_data.get("image_weight", 1), + "weird": request_data.get("weird", 0), + }, + } + + # Add NegativePrompt and No only if they are not empty + if negative_prompt or request_data.get("negativePrompt"): + payload["promptObject"]["negativePrompt"] = negative_prompt or request_data.get("negativePrompt", "") + + no_param = request_data.get("no", "") + if no_param: + payload["promptObject"]["no"] = no_param + + # Detailed logging for Midjourney - only once! + logger.info(f"[{request_id}] Midjourney promptObject: {json.dumps(payload['promptObject'], indent=2)}") + elif model in ["black-forest-labs/flux-schnell", "flux-schnell"]: + payload = { + "type": "IMAGE_GENERATOR", + "model": "black-forest-labs/flux-schnell", + "promptObject": { + "prompt": prompt, + "num_outputs": request_data.get("n", 1), + "aspect_ratio": aspect_ratio or request_data.get("aspect_ratio", "1:1"), + "output_format": request_data.get("output_format", "webp"), + }, + } + elif model in ["black-forest-labs/flux-dev", "flux-dev"]: + payload = { + "type": "IMAGE_GENERATOR", + "model": "black-forest-labs/flux-dev", + "promptObject": { + "prompt": prompt, + "num_outputs": request_data.get("n", 1), + "aspect_ratio": aspect_ratio or request_data.get("aspect_ratio", "1:1"), + "output_format": request_data.get("output_format", "webp"), + }, + } + elif model in ["black-forest-labs/flux-pro", "flux-pro"]: + payload = { + "type": "IMAGE_GENERATOR", + "model": "black-forest-labs/flux-pro", + "promptObject": { + "prompt": prompt, + "num_outputs": request_data.get("n", 1), + "aspect_ratio": aspect_ratio or request_data.get("aspect_ratio", "1:1"), + "output_format": request_data.get("output_format", "webp"), + }, + } + elif model in ["black-forest-labs/flux-1.1-pro", "flux-1.1-pro"]: + payload = { + "type": "IMAGE_GENERATOR", + "model": "black-forest-labs/flux-1.1-pro", + "promptObject": { + "prompt": prompt, + "num_outputs": request_data.get("n", 1), + "aspect_ratio": aspect_ratio or request_data.get("aspect_ratio", "1:1"), + "output_format": request_data.get("output_format", "webp"), + }, + } + elif model in [ + "6b645e3a-d64f-4341-a6d8-7a3690fbf042", + "phoenix", + ]: # Leonardo.ai - Phoenix + payload = { + "type": "IMAGE_GENERATOR", + "model": "6b645e3a-d64f-4341-a6d8-7a3690fbf042", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size, # The size is determined on the basis of aspect_ratio in Parse_aspect_ratio + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + logger.debug( + f"[{request_id}] Leonardo.ai Phoenix payload with size: {size}, from aspect_ratio: {aspect_ratio}") + elif model in [ + "b24e16ff-06e3-43eb-8d33-4416c2d75876", + "lightning-xl", + ]: # Leonardo.ai - Lightning XL + payload = { + "type": "IMAGE_GENERATOR", + "model": "b24e16ff-06e3-43eb-8d33-4416c2d75876", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size, # The size is determined on the basis of aspect_ratio in Parse_aspect_ratio + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + logger.debug( + f"[{request_id}] Leonardo.ai Lightning XL payload with size: {size}, from aspect_ratio: {aspect_ratio}") + elif model in [ + "5c232a9e-9061-4777-980a-ddc8e65647c6", + "vision-xl", + ]: # Leonardo.ai - Vision XL + payload = { + "type": "IMAGE_GENERATOR", + "model": "5c232a9e-9061-4777-980a-ddc8e65647c6", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size, # The size is determined on the basis of aspect_ratio in Parse_aspect_ratio + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + logger.debug( + f"[{request_id}] Leonardo.ai Vision XL payload with size: {size}, from aspect_ratio: {aspect_ratio}") + elif model in [ + "e71a1c2f-4f80-4800-934f-2c68979d8cc8", + "anime-xl", + ]: # Leonardo.ai - Anime XL + payload = { + "type": "IMAGE_GENERATOR", + "model": "e71a1c2f-4f80-4800-934f-2c68979d8cc8", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size or request_data.get("size", "1024x1024"), + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + "aspect_ratio": aspect_ratio + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + if not payload["promptObject"]["aspect_ratio"]: + del payload["promptObject"]["aspect_ratio"] + logger.debug(f"[{request_id}] Leonardo.ai Anime XL payload with size: {size}") + elif model in [ + "1e60896f-3c26-4296-8ecc-53e2afecc132", + "diffusion-xl", + ]: # Leonardo.ai - Diffusion XL + payload = { + "type": "IMAGE_GENERATOR", + "model": "1e60896f-3c26-4296-8ecc-53e2afecc132", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size or request_data.get("size", "1024x1024"), + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + "aspect_ratio": aspect_ratio + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + if not payload["promptObject"]["aspect_ratio"]: + del payload["promptObject"]["aspect_ratio"] + logger.debug(f"[{request_id}] Leonardo.ai Diffusion XL payload with size: {size}") + elif model in [ + "aa77f04e-3eec-4034-9c07-d0f619684628", + "kino-xl", + ]: # Leonardo.ai - Kino XL + payload = { + "type": "IMAGE_GENERATOR", + "model": "aa77f04e-3eec-4034-9c07-d0f619684628", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size or request_data.get("size", "1024x1024"), + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + "aspect_ratio": aspect_ratio + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + if not payload["promptObject"]["aspect_ratio"]: + del payload["promptObject"]["aspect_ratio"] + logger.debug(f"[{request_id}] Leonardo.ai Kino XL payload with size: {size}") + elif model in [ + "2067ae52-33fd-4a82-bb92-c2c55e7d2786", + "albedo-base-xl", + ]: # Leonardo.ai - Albedo Base XL + payload = { + "type": "IMAGE_GENERATOR", + "model": "2067ae52-33fd-4a82-bb92-c2c55e7d2786", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size or request_data.get("size", "1024x1024"), + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + "aspect_ratio": aspect_ratio + }, + } + # We delete empty parameters + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + if not payload["promptObject"]["aspect_ratio"]: + del payload["promptObject"]["aspect_ratio"] + logger.debug(f"[{request_id}] Leonardo.ai Albedo Base XL payload with size: {size}") + else: + logger.error(f"[{request_id}] Invalid model: {model}") + return ERROR_HANDLER(1002, model) + + logger.debug(f"[{request_id}] Sending request to 1min.ai API: {api_url}") + logger.debug(f"[{request_id}] Payload: {json.dumps(payload)[:500]}") + + # We set parameters for repeated attempts + max_retries = 1 # Only one attempt for all models + retry_count = 0 + start_time = time.time() # We remember the start time to track the total waiting time + + try: + # We send a request with a timeout + response = api_request( + "POST", + api_url, + headers=headers, + json=payload, + timeout=timeout, + stream=False + ) + + logger.debug(f"[{request_id}] Response status code: {response.status_code}") + + # If a successful answer is received, we process it + if response.status_code == 200: + one_min_response = response.json() + else: + # For any errors, we immediately return the answer + error_msg = "Unknown error" + try: + error_data = response.json() + if "error" in error_data: + error_msg = error_data["error"] + except: + pass + + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + else: + return ( + jsonify({"error": error_msg}), + response.status_code, + ) + + except Exception as e: + logger.error(f"[{request_id}] Exception during API request: {str(e)}") + return jsonify({"error": f"API request failed: {str(e)}"}), 500 + + try: + # We get all the URL images if they are available + image_urls = [] + + # Check if the response of an array of URL images + result_object = one_min_response.get("aiRecord", {}).get("aiRecordDetail", {}).get("resultObject", []) + + if isinstance(result_object, list) and result_object: + image_urls = result_object + elif result_object and isinstance(result_object, str): + image_urls = [result_object] + + # If the URL is not found, we will try other extracts + if not image_urls: + if "resultObject" in one_min_response: + if isinstance(one_min_response["resultObject"], list): + image_urls = one_min_response["resultObject"] + else: + image_urls = [one_min_response["resultObject"]] + + if not image_urls: + logger.error( + f"[{request_id}] Could not extract image URLs from API response: {json.dumps(one_min_response)[:500]}" + ) + return ( + jsonify({"error": "Could not extract image URLs from API response"}), + 500, + ) + + logger.debug( + f"[{request_id}] Successfully generated {len(image_urls)} images" + ) + + # We save the parameters of the image generation in Memcache for subsequent use in variations + if model in ["midjourney", "midjourney_6_1"]: + try: + # We save the parameters for each generated image + for url in image_urls: + if url: + # We extract ID images from the URL + image_id_match = re.search(r'images/(\d+_\d+_\d+_\d+_\d+_\d+|\w+\d+)\.png', url) + if image_id_match: + image_id = image_id_match.group(1) + logger.info(f"[{request_id}] Extracted image_id from URL: {image_id}") + + # We save only the necessary parameters + gen_params = { + "mode": payload["promptObject"].get("mode", "fast"), + "aspect_width": payload["promptObject"].get("aspect_width", 1), + "aspect_height": payload["promptObject"].get("aspect_height", 1), + "isNiji6": payload["promptObject"].get("isNiji6", False), + "maintainModeration": payload["promptObject"].get("maintainModeration", True) + } + + gen_params_key = f"gen_params:{image_id}" + # We use the updated version of Safe_Memcache_OPREEN + safe_memcached_operation('set', gen_params_key, gen_params, expiry=3600*24*7) # Store 7 days + logger.info(f"[{request_id}] Saved generation parameters for image {image_id}: {gen_params}") + + # We check that the parameters are precisely preserved correctly + if gen_params_key in MEMORY_STORAGE: + logger.info(f"[{request_id}] Verified saved directly in MEMORY_STORAGE: {MEMORY_STORAGE[gen_params_key]}") + except Exception as e: + logger.error(f"[{request_id}] Error saving generation parameters: {str(e)}") + + # We form full URLs for all images + full_image_urls = [] + asset_host = "https://asset.1min.ai" + + for url in image_urls: + if not url: + continue + + # Check if the URL contains full URL + if not url.startswith("http"): + # If the image begins with /, do not add one more / + if url.startswith("/"): + full_url = f"{asset_host}{url}" + else: + full_url = f"{asset_host}/{url}" + else: + full_url = url + + full_image_urls.append(full_url) + + # We form a response in Openai format with teams for variations + openai_data = [] + for i, url in enumerate(full_image_urls): + # Create a short identifier for image + image_id = str(uuid.uuid4())[:8] + + # Add commands for variations only if the model supports variations + if model in IMAGE_VARIATION_MODELS: + variation_commands = { + "url": url, + "revised_prompt": prompt, + "variation_commands": { + "variation": f"/v{i + 1} {url}", # Team to create variation with number + } + } + openai_data.append(variation_commands) + else: + openai_data.append({"url": url, "revised_prompt": prompt}) + + openai_response = { + "created": int(time.time()), + "data": openai_data, + } + + # For compatibility with the format of text answers, add Structure_outPut + structured_output = {"type": "image", "image_urls": full_image_urls} + + # We form a markdown text with variation buttons + if len(full_image_urls) == 1: + text_response = f"![Image]({full_image_urls[0]}) `[_V1_]`" + # Add a hint about the creation of variations + text_response += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** and send it (paste) in the next **prompt**" + else: + # We form a text with images and buttons of variations on one line + image_lines = [] + + for i, url in enumerate(full_image_urls): + image_lines.append(f"![Image {i + 1}]({url}) `[_V{i + 1}_]`") + + # Combine lines with a new line between them + text_response = "\n".join(image_lines) + + # Add a hint about the creation of variations + text_response += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**" + + openai_response["choices"] = [ + { + "message": { + "role": "assistant", + "content": text_response, + "structured_output": structured_output + }, + "index": 0, + "finish_reason": "stop" + } + ] + + logger.info(f"[{request_id}] Returning {len(openai_data)} image URLs to client") + response = make_response(jsonify(openai_response)) + set_response_headers(response) + return response, 200 + except Exception as e: + logger.error( + f"[{request_id}] Error processing image generation response: {str(e)}" + ) + return jsonify({"error": str(e)}), 500 + except Exception as e: + logger.error( + f"[{request_id}] Exception during image generation request: {str(e)}" + ) + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/images/variations", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +@cross_origin() +def image_variations(): + if request.method == "OPTIONS": + return handle_options_request() + + # Create a unique ID for request + request_id = str(uuid.uuid4()) + logger.debug(f"[{request_id}] Processing image variation request") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + api_key = auth_header.split(" ")[1] + + # We check whether a request has come with the REQUEST_ID parameter (redirection from/V1/Chat/Complets) + if 'request_id' in request.args: + # We get data on variation from storages (Memcache or Memory_storage) + redirect_request_id = request.args.get('request_id') + variation_key = f"variation:{redirect_request_id}" + logger.info(f"[{request_id}] Looking for variation data with key: {variation_key}") + + variation_data_json = safe_memcached_operation('get', variation_key) + + if variation_data_json: + logger.info(f"[{request_id}] Found variation data: {variation_data_json}") + try: + if isinstance(variation_data_json, str): + variation_data = json.loads(variation_data_json) + elif isinstance(variation_data_json, bytes): + variation_data = json.loads(variation_data_json.decode('utf-8')) + else: + variation_data = variation_data_json + + # We get the way to the temporary file, model and number of variations + temp_file_path = variation_data.get("temp_file") + model = variation_data.get("model") + n = variation_data.get("n", 1) + # We get a relative path from the data if it was preserved + image_path = variation_data.get("image_path") + + logger.debug( + f"[{request_id}] Retrieved variation data from memcached: model={model}, n={n}, temp_file={temp_file_path}") + if image_path: + logger.debug(f"[{request_id}] Retrieved image path from memcached: {image_path}") + + # We check that the file exists + file_exists = os.path.exists(temp_file_path) + logger.info(f"[{request_id}] Temporary file exists: {file_exists}, path: {temp_file_path}") + + if file_exists: + # We download the file and process directly + try: + with open(temp_file_path, 'rb') as f: + file_data = f.read() + + file_size = len(file_data) + logger.info(f"[{request_id}] Read temporary file, size: {file_size} bytes") + + # Create a temporary file for processing a request + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") + temp_file.write(file_data) + temp_file.close() + logger.info(f"[{request_id}] Created new temporary file: {temp_file.name}") + + # Create a file object for the Image_variations route + from io import BytesIO + file_data_io = BytesIO(file_data) + + # We register the file in Request.files via Workraund + from werkzeug.datastructures import FileStorage + file_storage = FileStorage( + stream=file_data_io, + filename="variation.png", + content_type="image/png", + ) + + logger.info(f"[{request_id}] Created FileStorage object for image") + + # We process a request with a new temporary file + request.files = {"image": file_storage} + logger.info(f"[{request_id}] Added file to request.files") + + # Create a form with the necessary parameters + form_data = [("model", model), ("n", str(n))] + + # If there is a relative path, add it to the form + if image_path: + form_data.append(("image_path", image_path)) + logger.info(f"[{request_id}] Added image_path to form_data: {image_path}") + + request.form = MultiDict(form_data) + logger.info(f"[{request_id}] Set request.form with data: {form_data}") + + logger.info(f"[{request_id}] Using file from memcached for image variations") + + # We delete the original temporary file + try: + os.unlink(temp_file_path) + logger.debug(f"[{request_id}] Deleted original temporary file: {temp_file_path}") + except Exception as e: + logger.warning(f"[{request_id}] Failed to delete original temporary file: {str(e)}") + + # We will use the original temporary file instead of creating a new + # to avoid problems with the closing of the flow + except Exception as e: + logger.error(f"[{request_id}] Error processing file from memcached: {str(e)}") + return jsonify({"error": f"Error processing variation request: {str(e)}"}), 500 + else: + logger.error(f"[{request_id}] Temporary file not found: {temp_file_path}") + return jsonify({"error": "Image file not found"}), 400 + except Exception as e: + logger.error(f"[{request_id}] Error processing variation data: {str(e)}") + return jsonify({"error": f"Error processing variation request: {str(e)}"}), 500 + else: + logger.error(f"[{request_id}] No variation data found in memcached with key: {variation_key}") + return jsonify({"error": "No variation data found"}), 400 + + # Getting an image file + if "image" not in request.files: + logger.error(f"[{request_id}] No image file provided") + return jsonify({"error": "No image file provided"}), 400 + + image_file = request.files["image"] + original_model = request.form.get("model", "dall-e-2").strip() + n = int(request.form.get("n", 1)) + size = request.form.get("size", "1024x1024") + prompt_text = request.form.get("prompt", "") # We extract the prompt field from the request if it is + # mode = request.form.get("mode", "relax") # We get a regime from a request + + # We check whether the relative path to the image in the Form-data has been transmitted + relative_image_path = request.form.get("image_path") + if relative_image_path: + logger.debug(f"[{request_id}] Using relative image path from form: {relative_image_path}") + + logger.debug(f"[{request_id}] Original model requested: {original_model} for image variations") + + # Determine the order of models for Fallback + fallback_models = ["midjourney_6_1", "midjourney", "clipdrop", "dall-e-2"] + + # If the requested model supports variations, we try it first + if original_model in IMAGE_VARIATION_MODELS: + # We start with the requested model, then we try others, excluding the already requested + models_to_try = [original_model] + [m for m in fallback_models if m != original_model] + else: + # If the requested model does not support variations, we start with Fallback models + logger.warning( + f"[{request_id}] Model {original_model} does not support image variations. Will try fallback models") + models_to_try = fallback_models + + # We save a temporary file for multiple use + try: + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") + image_file.save(temp_file.name) + temp_file.close() + except Exception as e: + logger.error(f"[{request_id}] Failed to save temporary file: {str(e)}") + return jsonify({"error": "Failed to process image file"}), 500 + + # Create a session to download the image + session = create_session() + headers = {"API-KEY": api_key} + + # We extract the ratio of the parties from the prompt field if it is + aspect_width = 1 + aspect_height = 1 + if "--ar" in prompt_text: + ar_match = re.search(r'--ar\s+(\d+):(\d+)', prompt_text) + if ar_match: + aspect_width = int(ar_match.group(1)) + aspect_height = int(ar_match.group(2)) + logger.debug(f"[{request_id}] Extracted aspect ratio: {aspect_width}:{aspect_height}") + + # Initialize the variable for variations in front of the cycle + variation_urls = [] + current_model = None + + # We try each model in turn + for model in models_to_try: + logger.info(f"[{request_id}] Trying model: {model} for image variations") + current_model = model + + try: + # Special processing for Dall-E 2 + if model == "dall-e-2": + # For Dall-E 2, you need to use a special Openai and direct file transfer + logger.debug(f"[{request_id}] Special handling for DALL-E 2 variations") + + # Open the image file and create a request + with open(temp_file.name, 'rb') as img_file: + # Openai expects a file directly to Form-Data + dalle_files = { + 'image': (os.path.basename(temp_file.name), img_file, 'image/png') + } + + # Request parameters + dalle_form_data = { + 'n': n, + 'size': size, + 'model': 'dall-e-2' + } + + # We create a request for variation directly to Openai API + try: + # Try to use a direct connection to Openai if available + openai_api_key = os.environ.get("OPENAI_API_KEY") + if openai_api_key: + openai_headers = {"Authorization": f"Bearer {openai_api_key}"} + openai_url = "https://api.openai.com/v1/images/variations" + + logger.debug(f"[{request_id}] Trying direct OpenAI API for DALL-E 2 variations") + variation_response = requests.post( + openai_url, + files=dalle_files, + data=dalle_form_data, + headers=openai_headers, + timeout=300 + ) + + if variation_response.status_code == 200: + logger.debug(f"[{request_id}] OpenAI API variation successful") + variation_data = variation_response.json() + + # We extract the URL from the answer + if "data" in variation_data and isinstance(variation_data["data"], list): + for item in variation_data["data"]: + if "url" in item: + variation_urls.append(item["url"]) + + if variation_urls: + logger.info( + f"[{request_id}] Successfully created {len(variation_urls)} variations with DALL-E 2 via OpenAI API") + # We form an answer in Openai API format + response_data = { + "created": int(time.time()), + "data": [{"url": url} for url in variation_urls] + } + return jsonify(response_data) + else: + logger.error( + f"[{request_id}] OpenAI API variation failed: {variation_response.status_code}, {variation_response.text}") + except Exception as e: + logger.error(f"[{request_id}] Error trying direct OpenAI API: {str(e)}") + + # If the direct request to Openai failed, we try through 1min.ai API + try: + # We reject the file because it could be read in the previous request + img_file.seek(0) + + # We draw a request through our own and 1min.ai and dall-e 2 + onemin_url = "https://api.1min.ai/api/features/images/variations" + + logger.debug(f"[{request_id}] Trying 1min.ai API for DALL-E 2 variations") + dalle_onemin_headers = {"API-KEY": api_key} + variation_response = requests.post( + onemin_url, + files=dalle_files, + data=dalle_form_data, + headers=dalle_onemin_headers, + timeout=300 + ) + + if variation_response.status_code == 200: + logger.debug(f"[{request_id}] 1min.ai API variation successful") + variation_data = variation_response.json() + + # We extract the URL from the answer + if "data" in variation_data and isinstance(variation_data["data"], list): + for item in variation_data["data"]: + if "url" in item: + variation_urls.append(item["url"]) + + if variation_urls: + logger.info( + f"[{request_id}] Successfully created {len(variation_urls)} variations with DALL-E 2 via 1min.ai API") + # We form an answer in Openai API format + response_data = { + "created": int(time.time()), + "data": [{"url": url} for url in variation_urls] + } + return jsonify(response_data) + else: + logger.error( + f"[{request_id}] 1min.ai API variation failed: {variation_response.status_code}, {variation_response.text}") + except Exception as e: + logger.error(f"[{request_id}] Error trying 1min.ai API: {str(e)}") + + # If you could not create a variation with Dall-E 2, we continue with other models + logger.warning(f"[{request_id}] Failed to create variations with DALL-E 2, trying next model") + continue + + # For other models, we use standard logic + # Image loading in 1min.ai + with open(temp_file.name, 'rb') as img_file: + files = {"asset": (os.path.basename(temp_file.name), img_file, "image/png")} + + asset_response = session.post( + ONE_MIN_ASSET_URL, files=files, headers=headers + ) + logger.debug( + f"[{request_id}] Image upload response status code: {asset_response.status_code}" + ) + + if asset_response.status_code != 200: + logger.error( + f"[{request_id}] Failed to upload image: {asset_response.status_code} - {asset_response.text}" + ) + continue # We try the next model + + # Extract an ID of a loaded image and a full URL + asset_data = asset_response.json() + logger.debug(f"[{request_id}] Asset upload response: {asset_data}") + + # We get a URL or ID image + image_id = None + image_url = None + image_location = None + + # We are looking for ID in different places of the response structure + if "id" in asset_data: + image_id = asset_data["id"] + elif "fileContent" in asset_data and "id" in asset_data["fileContent"]: + image_id = asset_data["fileContent"]["id"] + elif "fileContent" in asset_data and "uuid" in asset_data["fileContent"]: + image_id = asset_data["fileContent"]["uuid"] + + # We are looking for an absolute URL (location) for image + if "asset" in asset_data and "location" in asset_data["asset"]: + image_location = asset_data["asset"]["location"] + # Extract a relative path if the URL contains the domain + if image_location and "asset.1min.ai/" in image_location: + image_location = image_location.split('asset.1min.ai/', 1)[1] + # Remove the initial slash if necessary + if image_location and image_location.startswith('/'): + image_location = image_location[1:] + logger.debug(f"[{request_id}] Using relative path for image location: {image_location}") + + # If there is a Path, we use it as a URL image + if "fileContent" in asset_data and "path" in asset_data["fileContent"]: + image_url = asset_data["fileContent"]["path"] + # Extract a relative path if the URL contains the domain + if "asset.1min.ai/" in image_url: + image_url = image_url.split('asset.1min.ai/', 1)[1] + # Remove the initial slash if necessary + if image_url.startswith('/'): + image_url = image_url[1:] + logger.debug(f"[{request_id}] Using relative path for image: {image_url}") + + if not (image_id or image_url or image_location): + logger.error(f"[{request_id}] Failed to extract image information from response") + continue # We try the next model + + # We form Payload for image variation + # We determine which model to use + if model.startswith("midjourney"): + # Check if the URL contains the Asset.1Min.Ai domain + if image_url and "asset.1min.ai/" in image_url: + # We extract only the relative path from the URL + relative_image_url = image_url.split('asset.1min.ai/', 1)[1] + # Remove the initial slash if it is + if relative_image_url.startswith('/'): + relative_image_url = relative_image_url[1:] + logger.info(f"[{request_id}] Extracted relative URL for Midjourney: {relative_image_url}") + else: + relative_image_url = image_url if image_url else image_location + if relative_image_url and relative_image_url.startswith('/'): + relative_image_url = relative_image_url[1:] + + # For Midjourney + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": relative_image_url if relative_image_url else image_url if image_url else image_location, + "mode": mode or "fast", + "n": 4, + "isNiji6": False, + "aspect_width": aspect_width or 1, + "aspect_height": aspect_height or 1, + "maintainModeration": True + } + } + elif model == "dall-e-2": + # For Dall-E 2 + payload = { + "type": "IMAGE_VARIATOR", + "model": "dall-e-2", + "promptObject": { + "imageUrl": relative_image_url if relative_image_url else image_url if image_url else image_location, + "n": 1, + "size": "1024x1024" + } + } + elif model == "clipdrop": + # For Clipdrop (Stable Diffusion) + payload = { + "type": "IMAGE_VARIATOR", + "model": "clipdrop", + "promptObject": { + "imageUrl": relative_image_url if relative_image_url else image_url if image_url else image_location, + } + } + else: + # For all other models, we use minimal parameters + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": relative_image_url if relative_image_url else image_url if image_url else image_location, + "n": int(n) + } + } + + # Remove the initial slash in Imageurl if it is + if "imageUrl" in payload["promptObject"] and payload["promptObject"]["imageUrl"] and isinstance( + payload["promptObject"]["imageUrl"], str) and payload["promptObject"]["imageUrl"].startswith( + '/'): + payload["promptObject"]["imageUrl"] = payload["promptObject"]["imageUrl"][1:] + logger.debug( + f"[{request_id}] Removed leading slash from imageUrl: {payload['promptObject']['imageUrl']}") + + # For VIP users, add Credit to the request + if api_key.startswith("vip-"): + payload["credits"] = 90000 # Standard number of loans for VIP + + # Detailed Payload logistics for debugging + logger.info(f"[{request_id}] {model} variation payload: {json.dumps(payload, indent=2)}") + + # Using Timeout for all models (10 minutes) + timeout = MIDJOURNEY_TIMEOUT + + logger.debug(f"[{request_id}] Sending variation request to {ONE_MIN_API_URL}") + + # We send a request to create a variation + variation_response = api_request( + "POST", + f"{ONE_MIN_API_URL}", + headers={"API-KEY": api_key, "Content-Type": "application/json"}, + json=payload, + timeout=timeout + ) + + if variation_response.status_code != 200: + # We process the 504 error for Midjourney in a special way + if variation_response.status_code == 504 and model.startswith("midjourney"): + logger.error( + f"[{request_id}] Received a 504 Gateway Timeout for Midjourney variations. Returning the error to the client.") + return ( + jsonify( + {"error": "Gateway Timeout (504) occurred while processing image variation request."}), + 504, + ) + # For other errors, we continue to try the next model + logger.error( + f"[{request_id}] Variation request with model {model} failed: {variation_response.status_code} - {variation_response.text}") + continue + + # We process the answer and form the result + variation_data = variation_response.json() + # Add a detailed log for myidjourney model + if model.startswith("midjourney"): + logger.info(f"[{request_id}] Full Midjourney variation response: {json.dumps(variation_data, indent=2)}") + logger.debug(f"[{request_id}] Variation response: {variation_data}") + + # We extract the URL variations - initialize an empty array before searching + variation_urls = [] + # We are trying to find URL variations in the answer - various structures for different models + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + record_detail = variation_data["aiRecord"]["aiRecordDetail"] + if "resultObject" in record_detail: + result = record_detail["resultObject"] + if isinstance(result, list): + variation_urls = result + elif isinstance(result, str): + variation_urls = [result] + + # An alternative search path + if not variation_urls and "resultObject" in variation_data: + result = variation_data["resultObject"] + if isinstance(result, list): + variation_urls = result + elif isinstance(result, str): + variation_urls = [result] + + # Search in Data.URL for Dall-E 2 + if not variation_urls and "data" in variation_data and isinstance(variation_data["data"], list): + for item in variation_data["data"]: + if "url" in item: + variation_urls.append(item["url"]) + + if not variation_urls: + logger.error(f"[{request_id}] No variation URLs found in response with model {model}") + continue # We try the next model + + # Successfully received variations, we leave the cycle + logger.info(f"[{request_id}] Successfully generated variations with model {model}") + break + + except Exception as e: + logger.error(f"[{request_id}] Exception during variation request with model {model}: {str(e)}") + continue # We try the next model + + # Clean the temporary file + try: + os.unlink(temp_file.name) + except: + pass + + # We check if you managed to get variations from any of the models + if not variation_urls: + session.close() + return jsonify({"error": "Failed to create image variations with any available model"}), 500 + + # We form complete URL for variations + full_variation_urls = [] + asset_host = "https://asset.1min.ai" + + for url in variation_urls: + if not url: + continue + + # We save the relative path for the API, but create a full URL for display + relative_url = url + # If the URL contains a domain, we extract a relative path + if "asset.1min.ai/" in url: + relative_url = url.split('asset.1min.ai/', 1)[1] + # Remove the initial slash if it is + if relative_url.startswith('/'): + relative_url = relative_url[1:] + # If the URL is already without a domain, but starts with the slashus, we remove the slash + elif url.startswith('/'): + relative_url = url[1:] + + # Create a full URL to display the user + if not url.startswith("http"): + if url.startswith("/"): + full_url = f"{asset_host}{url}" + else: + full_url = f"{asset_host}/{url}" + else: + full_url = url + + # We keep the relative path and full URL + full_variation_urls.append({ + "relative_path": relative_url, + "full_url": full_url + }) + + # We form an answer in Openai format + openai_data = [] + for url_data in full_variation_urls: + # We use the relative path for the API + openai_data.append({"url": url_data["relative_path"]}) + + openai_response = { + "created": int(time.time()), + "data": openai_data, + } + + # Add the text with variation buttons for Markdown Object + markdown_text = "" + if len(full_variation_urls) == 1: + # We use the full URL to display + markdown_text = f"![Variation]({full_variation_urls[0]['full_url']}) `[_V1_]`" + # Add a hint to create variations + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** and send it (paste) in the next **prompt**" + else: + # We form a text with images and buttons of variations on one line + image_lines = [] + + for i, url_data in enumerate(full_variation_urls): + # We use the full URL to display + image_lines.append(f"![Variation {i + 1}]({url_data['full_url']}) `[_V{i + 1}_]`") + + # Combine lines with a new line between them + markdown_text = "\n".join(image_lines) + # Add a hint to create variations + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**" + + openai_response["choices"] = [ + { + "message": { + "role": "assistant", + "content": markdown_text + }, + "index": 0, + "finish_reason": "stop" + } + ] + + session.close() + logger.info( + f"[{request_id}] Successfully generated {len(openai_data)} image variations using model {current_model}") + return jsonify(openai_response), 200 + + +@app.route("/v1/assistants", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def create_assistant(): + if request.method == "OPTIONS": + return handle_options_request() + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error("Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + request_data = request.json + name = request_data.get("name", "PDF Assistant") + instructions = request_data.get("instructions", "") + model = request_data.get("model", "gpt-4o-mini") + file_ids = request_data.get("file_ids", []) + + # Creating a conversation with PDF in 1min.ai + payload = { + "title": name, + "type": "CHAT_WITH_PDF", + "model": model, + "fileList": file_ids, + } + + response = requests.post( + ONE_MIN_CONVERSATION_API_URL, json=payload, headers=headers + ) + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + one_min_response = response.json() + + try: + conversation_id = one_min_response.get("id") + + openai_response = { + "id": f"asst_{conversation_id}", + "object": "assistant", + "created_at": int(time.time()), + "name": name, + "description": None, + "model": model, + "instructions": instructions, + "tools": [], + "file_ids": file_ids, + "metadata": {}, + } + + response = make_response(jsonify(openai_response)) + set_response_headers(response) + return response, 200 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +def handle_options_request(): + response = make_response() + response.headers.add("Access-Control-Allow-Origin", "*") + response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization") + response.headers.add("Access-Control-Allow-Methods", "POST, OPTIONS") + return response, 204 + + +def transform_response(one_min_response, request_data, prompt_token): + try: + # Output of the response structure for debugging + logger.debug(f"Response structure: {json.dumps(one_min_response)[:200]}...") + + # We get an answer from the appropriate place to json + result_text = ( + one_min_response.get("aiRecord", {}) + .get("aiRecordDetail", {}) + .get("resultObject", [""])[0] + ) + + if not result_text: + # Alternative ways to extract an answer + if "resultObject" in one_min_response: + result_text = ( + one_min_response["resultObject"][0] + if isinstance(one_min_response["resultObject"], list) + else one_min_response["resultObject"] + ) + elif "result" in one_min_response: + result_text = one_min_response["result"] + else: + # If you have not found an answer along the well -known paths, we return the error + logger.error(f"Cannot extract response text from API result") + result_text = "Error: Could not extract response from API" + + completion_token = calculate_token(result_text) + logger.debug( + f"Finished processing Non-Streaming response. Completion tokens: {str(completion_token)}" + ) + logger.debug(f"Total tokens: {str(completion_token + prompt_token)}") + + return { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion", + "created": int(time.time()), + "model": request_data.get("model", "mistral-nemo").strip(), + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": result_text, + }, + "finish_reason": "stop", + } + ], + "usage": { + "prompt_tokens": prompt_token, + "completion_tokens": completion_token, + "total_tokens": prompt_token + completion_token, + }, + } + except Exception as e: + logger.error(f"Error in transform_response: {str(e)}") + # Return the error in the format compatible with Openai + return { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion", + "created": int(time.time()), + "model": request_data.get("model", "mistral-nemo").strip(), + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": f"Error processing response: {str(e)}", + }, + "finish_reason": "stop", + } + ], + "usage": { + "prompt_tokens": prompt_token, + "completion_tokens": 0, + "total_tokens": prompt_token, + }, + } + + +def set_response_headers(response): + response.headers["Content-Type"] = "application/json" + response.headers["Access-Control-Allow-Origin"] = "*" # Corrected the hyphen in the title name + response.headers["X-Request-ID"] = str(uuid.uuid4()) + # Add more Cors headings + response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS" + response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type, Accept" + return response # Return the answer for the chain + + +def stream_response(response, request_data, model, prompt_tokens, session=None): + """ + Stream received from 1min.ai response in a format compatible with Openai API. + """ + all_chunks = "" + + # We send the first fragment: the role of the message + first_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "delta": {"role": "assistant"}, + "finish_reason": None, + } + ], + } + + yield f"data: {json.dumps(first_chunk)}\n\n" + + # Simple implementation for content processing + for chunk in response.iter_content(chunk_size=1024): + finish_reason = None + + return_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "delta": { + "content": chunk.decode('utf-8') + }, + "finish_reason": finish_reason + } + ] + } + all_chunks += chunk.decode('utf-8') + yield f"data: {json.dumps(return_chunk)}\n\n" + + tokens = calculate_token(all_chunks) + logger.debug(f"Finished processing streaming response. Completion tokens: {str(tokens)}") + logger.debug(f"Total tokens: {str(tokens + prompt_tokens)}") + + # Final cup denoting the end of the flow + final_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "delta": { + "content": "" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": tokens, + "total_tokens": tokens + prompt_tokens + } + } + yield f"data: {json.dumps(final_chunk)}\n\n" + yield "data: [DONE]\n\n" + + +def safe_temp_file(prefix, request_id=None): + """ + Safely creates a temporary file and guarantees its deletion after use + + Args: + Prefix: Prefix for file name + Request_id: ID Request for Logging + + Returns: + STR: Way to the temporary file + """ + request_id = request_id or str(uuid.uuid4())[:8] + random_string = "".join(random.choices(string.ascii_letters + string.digits, k=10)) + temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "temp") + + # Create a temporary directory if it is not + if not os.path.exists(temp_dir): + os.makedirs(temp_dir) + + # Clean old files (over 1 hour) + try: + current_time = time.time() + for old_file in os.listdir(temp_dir): + file_path = os.path.join(temp_dir, old_file) + if os.path.isfile(file_path): + # If the file is older than 1 hour - delete + if current_time - os.path.getmtime(file_path) > 3600: + try: + os.remove(file_path) + logger.debug( + f"[{request_id}] Removed old temp file: {file_path}" + ) + except Exception as e: + logger.warning( + f"[{request_id}] Failed to remove old temp file {file_path}: {str(e)}" + ) + except Exception as e: + logger.warning(f"[{request_id}] Error while cleaning old temp files: {str(e)}") + + # Create a new temporary file + temp_file_path = os.path.join(temp_dir, f"{prefix}_{request_id}_{random_string}") + return temp_file_path + + +def retry_image_upload(image_url, api_key, request_id=None): + """Uploads an image with repeated attempts, returns a direct link to it""" + request_id = request_id or str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Uploading image: {image_url}") + + # We create a new session for this request + session = create_session() + temp_file_path = None + + try: + # We load the image + if image_url.startswith(("http://", "https://")): + # URL loading + logger.debug(f"[{request_id}] Fetching image from URL: {image_url}") + response = session.get(image_url, stream=True) + response.raise_for_status() + image_data = response.content + else: + # Decoding Base64 + logger.debug(f"[{request_id}] Decoding base64 image") + image_data = base64.b64decode(image_url.split(",")[1]) + + # Check the file size + if len(image_data) == 0: + logger.error(f"[{request_id}] Empty image data") + return None + + # Create a temporary file + temp_file_path = safe_temp_file("image", request_id) + + with open(temp_file_path, "wb") as f: + f.write(image_data) + + # Check that the file is not empty + if os.path.getsize(temp_file_path) == 0: + logger.error(f"[{request_id}] Empty image file created: {temp_file_path}") + return None + + # We load to the server + try: + with open(temp_file_path, "rb") as f: + upload_response = session.post( + ONE_MIN_ASSET_URL, + headers={"API-KEY": api_key}, + files={ + "asset": ( + os.path.basename(image_url), + f, + ( + "image/webp" + if image_url.endswith(".webp") + else "image/jpeg" + ), + ) + }, + ) + + if upload_response.status_code != 200: + logger.error( + f"[{request_id}] Upload failed with status {upload_response.status_code}: {upload_response.text}" + ) + return None + + # We get URL images + upload_data = upload_response.json() + if isinstance(upload_data, str): + try: + upload_data = json.loads(upload_data) + except: + logger.error( + f"[{request_id}] Failed to parse upload response: {upload_data}" + ) + return None + + logger.debug(f"[{request_id}] Upload response: {upload_data}") + + # We get the path to the file from FileContent + if ( + "fileContent" in upload_data + and "path" in upload_data["fileContent"] + ): + url = upload_data["fileContent"]["path"] + logger.info(f"[{request_id}] Image uploaded successfully: {url}") + return url + + logger.error(f"[{request_id}] No path found in upload response") + return None + + except Exception as e: + logger.error(f"[{request_id}] Exception during image upload: {str(e)}") + return None + + except Exception as e: + logger.error(f"[{request_id}] Exception during image processing: {str(e)}") + traceback.print_exc() + return None + finally: + # Close the session + session.close() + # We delete a temporary file + if temp_file_path and os.path.exists(temp_file_path): + try: + os.remove(temp_file_path) + logger.debug(f"[{request_id}] Removed temp file: {temp_file_path}") + except Exception as e: + logger.warning( + f"[{request_id}] Failed to remove temp file {temp_file_path}: {str(e)}" + ) + + +def create_session(): + """Creates a new session with optimal settings for APIs""" + session = requests.Session() + + # Setting up repeated attempts for all requests + retry_strategy = requests.packages.urllib3.util.retry.Retry( + total=3, + backoff_factor=1, + status_forcelist=[429, 500, 502, 503, 504], + allowed_methods=["HEAD", "GET", "POST", "PUT", "DELETE", "OPTIONS", "TRACE"], + ) + adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy) + session.mount("http://", adapter) + session.mount("https://", adapter) + + return session + + +def upload_document(file_data, file_name, api_key, request_id=None): + """ + Downloads the file/document to the server and returns its ID. + + Args: + File_DATA: Binar file contents + File_name: file name + API_KEY: user API + Request_id: ID Request for Logging + + Returns: + STR: ID loaded file or None in case of error + """ + session = create_session() + try: + # Determine the type of expansion file + extension = os.path.splitext(file_name)[1].lower() + logger.info(f"[{request_id}] Uploading document: {file_name}") + + # Dictionary with MIME types for different file extensions + mime_types = { + ".pdf": "application/pdf", + ".txt": "text/plain", + ".doc": "application/msword", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".csv": "text/csv", + ".xls": "application/vnd.ms-excel", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".json": "application/json", + ".md": "text/markdown", + ".html": "text/html", + ".htm": "text/html", + ".xml": "application/xml", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".ppt": "application/vnd.ms-powerpoint", + ".rtf": "application/rtf", + } + + # We get MIME-type from a dictionary or use Octet-Stream by default + mime_type = mime_types.get(extension, "application/octet-stream") + + # Determine the type of file for special processing + file_type = None + if extension in [".doc"]: + file_type = "DOC" + elif extension in [".docx"]: + file_type = "DOCX" + + # We download the file to the server - add more details to the logs + logger.info( + f"[{request_id}] Uploading file to 1min.ai: {file_name} ({mime_type}, {len(file_data)} bytes)" + ) + + headers = {"API-KEY": api_key} + + # Special headlines for DOC/DOCX + if file_type in ["DOC", "DOCX"]: + headers["X-File-Type"] = file_type + + files = {"asset": (file_name, file_data, mime_type)} + + upload_response = session.post(ONE_MIN_ASSET_URL, headers=headers, files=files) + + if upload_response.status_code != 200: + logger.error( + f"[{request_id}] Document upload failed: {upload_response.status_code} - {upload_response.text}" + ) + return None + + # Detailed logistics of the answer + try: + response_text = upload_response.text + logger.debug( + f"[{request_id}] Raw upload response: {response_text[:500]}..." + ) + + response_data = upload_response.json() + logger.debug( + f"[{request_id}] Upload response JSON: {json.dumps(response_data)[:500]}..." + ) + + file_id = None + if "id" in response_data: + file_id = response_data["id"] + logger.debug(f"[{request_id}] Found file ID at top level: {file_id}") + elif ( + "fileContent" in response_data and "id" in response_data["fileContent"] + ): + file_id = response_data["fileContent"]["id"] + logger.debug(f"[{request_id}] Found file ID in fileContent: {file_id}") + elif ( + "fileContent" in response_data and "uuid" in response_data["fileContent"] + ): + file_id = response_data["fileContent"]["uuid"] + logger.debug(f"[{request_id}] Found file ID (uuid) in fileContent: {file_id}") + else: + # We are trying to find ID in other places of response structure + if isinstance(response_data, dict): + # Recursive search for ID in the response structure + def find_id(obj, path="root"): + if isinstance(obj, dict): + if "id" in obj: + logger.debug( + f"[{request_id}] Found ID at path '{path}': {obj['id']}" + ) + return obj["id"] + if "uuid" in obj: + logger.debug( + f"[{request_id}] Found UUID at path '{path}': {obj['uuid']}" + ) + return obj["uuid"] + for k, v in obj.items(): + result = find_id(v, f"{path}.{k}") + if result: + return result + elif isinstance(obj, list): + for i, item in enumerate(obj): + result = find_id(item, f"{path}[{i}]") + if result: + return result + return None + + file_id = find_id(response_data) + + if not file_id: + logger.error( + f"[{request_id}] Could not find file ID in response: {json.dumps(response_data)}" + ) + return None + + logger.info( + f"[{request_id}] Document uploaded successfully. File ID: {file_id}" + ) + return file_id + except Exception as e: + logger.error(f"[{request_id}] Error parsing upload response: {str(e)}") + traceback.print_exc() + return None + except Exception as e: + logger.error(f"[{request_id}] Error uploading document: {str(e)}") + traceback.print_exc() + return None + finally: + session.close() + + +@app.route("/v1/files", methods=["POST"]) +@limiter.limit("60 per minute") +def upload_file(): + """ + File download route (analogue Openai Files API) + """ + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received file upload request") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + if "file" not in request.files: + logger.error(f"[{request_id}] No file part in request") + return jsonify({"error": "No file part"}), 400 + + file = request.files["file"] + if file.filename == "": + logger.error(f"[{request_id}] No selected file") + return jsonify({"error": "No selected file"}), 400 + + try: + # We save the file in memory + file_data = file.read() + file_name = file.filename + + # We download the file to the 1min.ai server + file_id = upload_document(file_data, file_name, api_key, request_id) + + if not file_id: + return jsonify({"error": "Failed to upload file"}), 500 + + # We save the file of the file in the user's session through Memcache, if it is available + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + user_key = f"user:{api_key}" + # We get the current user's current files or create a new list + user_files_json = safe_memcached_operation('get', user_key) + user_files = [] + + if user_files_json: + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + except Exception as e: + logger.error(f"[{request_id}] Error parsing user files from memcached: {str(e)}") + user_files = [] + + # Add a new file + file_info = { + "id": file_id, + "filename": file_name, + "uploaded_at": int(time.time()) + } + + # Check that a file with such an ID is not yet on the list + if not any(f.get("id") == file_id for f in user_files): + user_files.append(file_info) + + # We save the updated file list + safe_memcached_operation('set', user_key, json.dumps(user_files)) + logger.info(f"[{request_id}] Saved file ID {file_id} for user in memcached") + + # Add the user to the list of well -known users for cleaning function + known_users_list_json = safe_memcached_operation('get', 'known_users_list') + known_users_list = [] + + if known_users_list_json: + try: + if isinstance(known_users_list_json, str): + known_users_list = json.loads(known_users_list_json) + elif isinstance(known_users_list_json, bytes): + known_users_list = json.loads(known_users_list_json.decode('utf-8')) + except Exception as e: + logger.error(f"[{request_id}] Error parsing known users list: {str(e)}") + + # Add the API key to the list of famous users if it is not yet + if api_key not in known_users_list: + known_users_list.append(api_key) + safe_memcached_operation('set', 'known_users_list', json.dumps(known_users_list)) + logger.debug(f"[{request_id}] Added user to known_users_list for cleanup") + except Exception as e: + logger.error(f"[{request_id}] Error saving file info to memcached: {str(e)}") + + # We create an answer in the Openai API format + response_data = { + "id": file_id, + "object": "file", + "bytes": len(file_data), + "created_at": int(time.time()), + "filename": file_name, + "purpose": request.form.get("purpose", "assistants") + } + + return jsonify(response_data) + except Exception as e: + logger.error(f"[{request_id}] Exception during file upload: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +def emulate_stream_response(full_content, request_data, model, prompt_tokens): + """ + Emulates a streaming response for cases when the API does not support the flow gear + + Args: + Full_Content: Full text of the answer + Request_Data: Request data + Model: Model + Prompt_tokens: the number of tokens in the request + + Yields: + STR: Lines for streaming + """ + # We break the answer to fragments by ~ 5 words + words = full_content.split() + chunks = [" ".join(words[i: i + 5]) for i in range(0, len(words), 5)] + + for chunk in chunks: + return_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [ + {"index": 0, "delta": {"content": chunk}, "finish_reason": None} + ], + } + + yield f"data: {json.dumps(return_chunk)}\n\n" + time.sleep(0.05) # Small delay in emulating stream + + # We calculate the tokens + tokens = calculate_token(full_content) + + # Final chambers + final_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{"index": 0, "delta": {"content": ""}, "finish_reason": "stop"}], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": tokens, + "total_tokens": tokens + prompt_tokens, + }, + } + + yield f"data: {json.dumps(final_chunk)}\n\n" + yield "data: [DONE]\n\n" + + +# A function for performing a request to the API with a new session +def api_request(req_method, url, headers=None, + requester_ip=None, data=None, + files=None, stream=False, + timeout=None, json=None, **kwargs): + """Performs the HTTP request to the API with the normalization of the URL and error processing""" + req_url = url.strip() + logger.debug(f"API request URL: {req_url}") + + # Request parameters + req_params = {} + if headers: + req_params["headers"] = headers + if data: + req_params["data"] = data + if files: + req_params["files"] = files + if stream: + req_params["stream"] = stream + if json: + req_params["json"] = json + + # Add other parameters + req_params.update(kwargs) + + # We check whether the request is an operation with images + is_image_operation = False + if json and isinstance(json, dict): + operation_type = json.get("type", "") + if operation_type in [IMAGE_GENERATOR, IMAGE_VARIATOR]: + is_image_operation = True + logger.debug(f"Detected image operation: {operation_type}, using extended timeout") + + # We use increased timaut for operations with images + if is_image_operation: + req_params["timeout"] = timeout or MIDJOURNEY_TIMEOUT + logger.debug(f"Using extended timeout for image operation: {MIDJOURNEY_TIMEOUT}s") + else: + req_params["timeout"] = timeout or DEFAULT_TIMEOUT + + # We fulfill the request + try: + response = requests.request(req_method, req_url, **req_params) + return response + except Exception as e: + logger.error(f"API request error: {str(e)}") + raise + + +@app.route("/v1/audio/transcriptions", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def audio_transcriptions(): + """ + Route for converting speech into text (analogue of Openai Whisper API) + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/audio/transcriptions") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + # Checking the availability of the Audio file + if "file" not in request.files: + logger.error(f"[{request_id}] No audio file provided") + return jsonify({"error": "No audio file provided"}), 400 + + audio_file = request.files["file"] + model = request.form.get("model", "whisper-1") + response_format = request.form.get("response_format", "text") + language = request.form.get("language", None) + temperature = request.form.get("temperature", 0) + + logger.info(f"[{request_id}] Processing audio transcription with model {model}") + + try: + # We create a new session for loading audio + session = create_session() + headers = {"API-KEY": api_key} + + # Audio loading in 1min.ai + files = {"asset": (audio_file.filename, audio_file, "audio/mpeg")} + + try: + asset_response = session.post( + ONE_MIN_ASSET_URL, files=files, headers=headers + ) + logger.debug( + f"[{request_id}] Audio upload response status code: {asset_response.status_code}" + ) + + if asset_response.status_code != 200: + session.close() + return ( + jsonify( + { + "error": asset_response.json().get( + "error", "Failed to upload audio" + ) + } + ), + asset_response.status_code, + ) + + audio_path = asset_response.json()["fileContent"]["path"] + logger.debug(f"[{request_id}] Successfully uploaded audio: {audio_path}") + finally: + session.close() + + # We form Payload for request Speech_to_text + payload = { + "type": "SPEECH_TO_TEXT", + "model": "whisper-1", + "promptObject": { + "audioUrl": audio_path, + "response_format": response_format, + }, + } + + # Add additional parameters if they are provided + if language: + payload["promptObject"]["language"] = language + + if temperature is not None: + payload["promptObject"]["temperature"] = float(temperature) + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # We send a request + logger.debug( + f"[{request_id}] Sending transcription request to {ONE_MIN_API_URL}" + ) + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug( + f"[{request_id}] Transcription response status code: {response.status_code}" + ) + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + logger.error( + f"[{request_id}] Error in transcription response: {response.text[:200]}" + ) + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + # We convert the answer to the Openai API format + one_min_response = response.json() + + try: + # We extract the text from the answer + result_text = "" + + if ( + "aiRecord" in one_min_response + and "aiRecordDetail" in one_min_response["aiRecord"] + ): + result_text = one_min_response["aiRecord"]["aiRecordDetail"].get( + "resultObject", [""] + )[0] + elif "resultObject" in one_min_response: + result_text = ( + one_min_response["resultObject"][0] + if isinstance(one_min_response["resultObject"], list) + else one_min_response["resultObject"] + ) + + # Check if the result_text json is + try: + # If result_text is a json line, we rush it + if result_text and result_text.strip().startswith("{"): + parsed_json = json.loads(result_text) + # If Parsed_json has a "Text" field, we use its value + if "text" in parsed_json: + result_text = parsed_json["text"] + logger.debug(f"[{request_id}] Extracted inner text from JSON: {result_text}") + except (json.JSONDecodeError, TypeError, ValueError): + # If it was not possible to steam like JSON, we use it as it is + logger.debug(f"[{request_id}] Using result_text as is: {result_text}") + pass + + if not result_text: + logger.error( + f"[{request_id}] Could not extract transcription text from API response" + ) + return jsonify({"error": "Could not extract transcription text"}), 500 + + # The most simple and reliable response format + logger.info(f"[{request_id}] Successfully processed audio transcription: {result_text}") + + # Create json strictly in Openai API format + response_data = {"text": result_text} + + # Add Cors headlines + response = jsonify(response_data) + response.headers["Access-Control-Allow-Origin"] = "*" + response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS" + response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type, Accept" + + return response + + except Exception as e: + logger.error( + f"[{request_id}] Error processing transcription response: {str(e)}" + ) + return jsonify({"error": str(e)}), 500 + + except Exception as e: + logger.error(f"[{request_id}] Exception during transcription request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/audio/translations", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def audio_translations(): + """ + Route for translating audio to text (analogue Openai Whisper API) + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/audio/translations") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + # Checking the availability of the Audio file + if "file" not in request.files: + logger.error(f"[{request_id}] No audio file provided") + return jsonify({"error": "No audio file provided"}), 400 + + audio_file = request.files["file"] + model = request.form.get("model", "whisper-1") + response_format = request.form.get("response_format", "text") + temperature = request.form.get("temperature", 0) + + logger.info(f"[{request_id}] Processing audio translation with model {model}") + + try: + # We create a new session for loading audio + session = create_session() + headers = {"API-KEY": api_key} + + # Audio loading in 1min.ai + files = {"asset": (audio_file.filename, audio_file, "audio/mpeg")} + + try: + asset_response = session.post( + ONE_MIN_ASSET_URL, files=files, headers=headers + ) + logger.debug( + f"[{request_id}] Audio upload response status code: {asset_response.status_code}" + ) + + if asset_response.status_code != 200: + session.close() + return ( + jsonify( + { + "error": asset_response.json().get( + "error", "Failed to upload audio" + ) + } + ), + asset_response.status_code, + ) + + audio_path = asset_response.json()["fileContent"]["path"] + logger.debug(f"[{request_id}] Successfully uploaded audio: {audio_path}") + finally: + session.close() + + # We form Payload for request Audio_Translator + payload = { + "type": "AUDIO_TRANSLATOR", + "model": "whisper-1", + "promptObject": { + "audioUrl": audio_path, + "response_format": response_format, + "temperature": float(temperature), + }, + } + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # We send a request + logger.debug(f"[{request_id}] Sending translation request to {ONE_MIN_API_URL}") + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug( + f"[{request_id}] Translation response status code: {response.status_code}" + ) + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + logger.error( + f"[{request_id}] Error in translation response: {response.text[:200]}" + ) + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + # We convert the answer to the Openai API format + one_min_response = response.json() + + try: + # We extract the text from the answer + result_text = "" + + if ( + "aiRecord" in one_min_response + and "aiRecordDetail" in one_min_response["aiRecord"] + ): + result_text = one_min_response["aiRecord"]["aiRecordDetail"].get( + "resultObject", [""] + )[0] + elif "resultObject" in one_min_response: + result_text = ( + one_min_response["resultObject"][0] + if isinstance(one_min_response["resultObject"], list) + else one_min_response["resultObject"] + ) + + if not result_text: + logger.error( + f"[{request_id}] Could not extract translation text from API response" + ) + return jsonify({"error": "Could not extract translation text"}), 500 + + # The most simple and reliable response format + logger.info(f"[{request_id}] Successfully processed audio translation: {result_text}") + + # Create json strictly in Openai API format + response_data = {"text": result_text} + + # Add Cors headlines + response = jsonify(response_data) + response.headers["Access-Control-Allow-Origin"] = "*" + response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS" + response.headers["Access-Control-Allow-Headers"] = "Authorization, Content-Type, Accept" + + return response + + except Exception as e: + logger.error( + f"[{request_id}] Error processing translation response: {str(e)}" + ) + return jsonify({"error": str(e)}), 500 + + except Exception as e: + logger.error(f"[{request_id}] Exception during translation request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/audio/speech", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def text_to_speech(): + """ + Route for converting text into speech (analogue Openai TTS API) + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = request.args.get('request_id', str(uuid.uuid4())[:8]) + logger.info(f"[{request_id}] Received request: /v1/audio/speech") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + # We get data data + request_data = {} + + # We check the availability of data in Memcached if the request has been redirected + if 'request_id' in request.args and 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + tts_session_key = f"tts_request_{request.args.get('request_id')}" + try: + session_data = safe_memcached_operation('get', tts_session_key) + if session_data: + if isinstance(session_data, str): + request_data = json.loads(session_data) + elif isinstance(session_data, bytes): + request_data = json.loads(session_data.decode('utf-8')) + else: + request_data = session_data + + # We delete data from the cache, they are no longer needed + safe_memcached_operation('delete', tts_session_key) + logger.debug(f"[{request_id}] Retrieved TTS request data from memcached") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving TTS session data: {str(e)}") + + # If the data is not found in Memcache, we try to get them from the query body + if not request_data and request.is_json: + request_data = request.json + + model = request_data.get("model", "tts-1") + input_text = request_data.get("input", "") + voice = request_data.get("voice", "alloy") + response_format = request_data.get("response_format", "mp3") + speed = request_data.get("speed", 1.0) + + logger.info(f"[{request_id}] Processing TTS request with model {model}") + logger.debug(f"[{request_id}] Text input: {input_text[:100]}..." if input_text and len(input_text) > 100 else f"[{request_id}] Text input: {input_text}") + + if not input_text: + logger.error(f"[{request_id}] No input text provided") + return jsonify({"error": "No input text provided"}), 400 + + try: + # We form Payload for request_to_Speech + payload = { + "type": "TEXT_TO_SPEECH", + "model": model, + "promptObject": { + "text": input_text, + "voice": voice, + "response_format": response_format, + "speed": speed + } + } + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # We send a request + logger.debug(f"[{request_id}] Sending TTS request to {ONE_MIN_API_URL}") + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug(f"[{request_id}] TTS response status code: {response.status_code}") + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + logger.error(f"[{request_id}] Error in TTS response: {response.text[:200]}") + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + # We process the answer + one_min_response = response.json() + + try: + # We get a URL audio from the answer + audio_url = "" + + if "aiRecord" in one_min_response and "aiRecordDetail" in one_min_response["aiRecord"]: + result_object = one_min_response["aiRecord"]["aiRecordDetail"].get("resultObject", "") + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + else: + audio_url = result_object + elif "resultObject" in one_min_response: + result_object = one_min_response["resultObject"] + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + else: + audio_url = result_object + + if not audio_url: + logger.error(f"[{request_id}] Could not extract audio URL from API response") + return jsonify({"error": "Could not extract audio URL"}), 500 + + # We get audio data by URL + audio_response = api_request("GET", f"https://asset.1min.ai/{audio_url}") + + if audio_response.status_code != 200: + logger.error(f"[{request_id}] Failed to download audio: {audio_response.status_code}") + return jsonify({"error": "Failed to download audio"}), 500 + + # We return the audio to the client + logger.info(f"[{request_id}] Successfully generated speech audio") + + # We create an answer with audio and correct MIME-type + content_type = "audio/mpeg" if response_format == "mp3" else f"audio/{response_format}" + response = make_response(audio_response.content) + response.headers["Content-Type"] = content_type + set_response_headers(response) + + return response + + except Exception as e: + logger.error(f"[{request_id}] Error processing TTS response: {str(e)}") + return jsonify({"error": str(e)}), 500 + + except Exception as e: + logger.error(f"[{request_id}] Exception during TTS request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +# Functions for working with files in API +@app.route("/v1/files", methods=["GET", "POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def handle_files(): + """ + Route for working with files: getting a list and downloading new files + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + # Get - getting a list of files + if request.method == "GET": + logger.info(f"[{request_id}] Received request: GET /v1/files") + try: + # We get a list of user files from MemcacheD + files = [] + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + user_key = f"user:{api_key}" + user_files_json = safe_memcached_operation('get', user_key) + + if user_files_json: + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + + # Let's convert files about files to API response format + for file_info in user_files: + if isinstance(file_info, dict) and "id" in file_info: + files.append({ + "id": file_info.get("id"), + "object": "file", + "bytes": file_info.get("bytes", 0), + "created_at": file_info.get("created_at", int(time.time())), + "filename": file_info.get("filename", f"file_{file_info.get('id')}"), + "purpose": "assistants", + "status": "processed" + }) + logger.debug(f"[{request_id}] Found {len(files)} files for user in memcached") + except Exception as e: + logger.error(f"[{request_id}] Error parsing user files from memcached: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving user files from memcached: {str(e)}") + + # We form an answer in Openai API format + response_data = { + "data": files, + "object": "list" + } + response = make_response(jsonify(response_data)) + set_response_headers(response) + return response + except Exception as e: + logger.error(f"[{request_id}] Exception during files list request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + # Post - downloading a new file + elif request.method == "POST": + logger.info(f"[{request_id}] Received request: POST /v1/files") + + # Checking a file + if "file" not in request.files: + logger.error(f"[{request_id}] No file provided") + return jsonify({"error": "No file provided"}), 400 + + file = request.files["file"] + purpose = request.form.get("purpose", "assistants") + + try: + # We get the contents of the file + file_data = file.read() + file_name = file.filename + + # We get a loaded file ID + file_id = upload_document(file_data, file_name, api_key, request_id) + + if not file_id: + logger.error(f"[{request_id}] Failed to upload file") + return jsonify({"error": "Failed to upload file"}), 500 + + # We form an answer in Openai API format + response_data = { + "id": file_id, + "object": "file", + "bytes": len(file_data), + "created_at": int(time.time()), + "filename": file_name, + "purpose": purpose, + "status": "processed" + } + + logger.info(f"[{request_id}] File uploaded successfully: {file_id}") + response = make_response(jsonify(response_data)) + set_response_headers(response) + return response + + except Exception as e: + logger.error(f"[{request_id}] Exception during file upload: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/files/", methods=["GET", "DELETE", "OPTIONS"]) +@limiter.limit("60 per minute") +def handle_file(file_id): + """ + Route for working with a specific file: obtaining information and deleting + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + # Get - obtaining file information + if request.method == "GET": + logger.info(f"[{request_id}] Received request: GET /v1/files/{file_id}") + try: + # We are looking for a file in saved user files in Memcache + file_info = None + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + user_key = f"user:{api_key}" + user_files_json = safe_memcached_operation('get', user_key) + + if user_files_json: + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + + # Looking for a file with the specified ID + for file_item in user_files: + if file_item.get("id") == file_id: + file_info = file_item + logger.debug(f"[{request_id}] Found file info in memcached: {file_id}") + break + except Exception as e: + logger.error(f"[{request_id}] Error parsing user files from memcached: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving user files from memcached: {str(e)}") + + # If the file is not found, we return the filler + if not file_info: + logger.debug(f"[{request_id}] File not found in memcached, using placeholder: {file_id}") + file_info = { + "id": file_id, + "bytes": 0, + "created_at": int(time.time()), + "filename": f"file_{file_id}" + } + + # We form an answer in Openai API format + response_data = { + "id": file_info.get("id"), + "object": "file", + "bytes": file_info.get("bytes", 0), + "created_at": file_info.get("created_at", int(time.time())), + "filename": file_info.get("filename", f"file_{file_id}"), + "purpose": "assistants", + "status": "processed" + } + + response = make_response(jsonify(response_data)) + set_response_headers(response) + return response + + except Exception as e: + logger.error(f"[{request_id}] Exception during file info request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + # Delete - File deletion + elif request.method == "DELETE": + logger.info(f"[{request_id}] Received request: DELETE /v1/files/{file_id}") + try: + # If the files are stored in Memcached, delete the file from the list + deleted = False + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + user_key = f"user:{api_key}" + user_files_json = safe_memcached_operation('get', user_key) + + if user_files_json: + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + + # We filter the list, excluding the file with the specified ID + new_user_files = [f for f in user_files if f.get("id") != file_id] + + # If the list has changed, we save the updated list + if len(new_user_files) < len(user_files): + safe_memcached_operation('set', user_key, json.dumps(new_user_files)) + logger.info(f"[{request_id}] Deleted file {file_id} from user's files in memcached") + deleted = True + except Exception as e: + logger.error(f"[{request_id}] Error updating user files in memcached: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving user files from memcached: {str(e)}") + + # Return the answer about successful removal (even if the file was not found) + response_data = { + "id": file_id, + "object": "file", + "deleted": True + } + + response = make_response(jsonify(response_data)) + set_response_headers(response) + return response + + except Exception as e: + logger.error(f"[{request_id}] Exception during file deletion: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/files//content", methods=["GET", "OPTIONS"]) +@limiter.limit("60 per minute") +def handle_file_content(file_id): + """ + Route for obtaining the contents of the file + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: GET /v1/files/{file_id}/content") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + + try: + # In 1min.ai there is no API to obtain the contents of the file by ID + # Return the error + logger.error(f"[{request_id}] File content retrieval not supported") + return jsonify({"error": "File content retrieval not supported"}), 501 + + except Exception as e: + logger.error(f"[{request_id}] Exception during file content request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +# Closter function for safe access to Memcache +def safe_memcached_operation(operation, key, value=None, expiry=3600): + """ + Safely performs operations on memcached, handling any exceptions. + + Args: + operation (str): The operation to perform ('get', 'set', or 'delete') + key (str): The key to operate on + value (any, optional): The value to set (only for 'set' operation) + expiry (int, optional): Expiry time in seconds (only for 'set' operation) + + Returns: + The result of the operation or None if it failed + """ + if MEMCACHED_CLIENT is None: + # If Memcache is not available, we use the local storage + if operation == 'get': + return MEMORY_STORAGE.get(key, None) + elif operation == 'set': + MEMORY_STORAGE[key] = value + logger.info(f"Saved in MEMORY_STORAGE: key={key}") + return True + elif operation == 'delete': + if key in MEMORY_STORAGE: + del MEMORY_STORAGE[key] + return True + return False + return None + + try: + if operation == 'get': + result = MEMCACHED_CLIENT.get(key) + if isinstance(result, bytes): + try: + return json.loads(result.decode('utf-8')) + except: + return result.decode('utf-8') + return result + elif operation == 'set': + if isinstance(value, (dict, list)): + value = json.dumps(value) + return MEMCACHED_CLIENT.set(key, value, time=expiry) + elif operation == 'delete': + return MEMCACHED_CLIENT.delete(key) + except Exception as e: + logger.error(f"Error in memcached operation {operation} on key {key}: {str(e)}") + # When error Memcated, we also use a local storage + if operation == 'get': + return MEMORY_STORAGE.get(key, None) + elif operation == 'set': + MEMORY_STORAGE[key] = value + logger.info(f"Saved in MEMORY_STORAGE due to memcached error: key={key}") + return True + elif operation == 'delete': + if key in MEMORY_STORAGE: + del MEMORY_STORAGE[key] + return True + return False + return None + + +def delete_all_files_task(): + """ + Function for periodic deleting all user files + """ + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Starting scheduled files cleanup task") + + try: + # We get all users with files from MemcacheD + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + # We get all the keys that begin with "user:" + try: + keys = [] + + # Instead of scanning Slabs, we use a list of famous users + # which should be saved when uploading files + known_users = safe_memcached_operation('get', 'known_users_list') + if known_users: + try: + if isinstance(known_users, str): + user_list = json.loads(known_users) + elif isinstance(known_users, bytes): + user_list = json.loads(known_users.decode('utf-8')) + else: + user_list = known_users + + for user in user_list: + user_key = f"user:{user}" if not user.startswith("user:") else user + if user_key not in keys: + keys.append(user_key) + except Exception as e: + logger.warning(f"[{request_id}] Failed to parse known users list: {str(e)}") + + logger.info(f"[{request_id}] Found {len(keys)} user keys for cleanup") + + # We delete files for each user + for user_key in keys: + try: + api_key = user_key.replace("user:", "") + user_files_json = safe_memcached_operation('get', user_key) + + if not user_files_json: + continue + + user_files = [] + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + except: + continue + + logger.info(f"[{request_id}] Cleaning up {len(user_files)} files for user {api_key[:8]}...") + + # We delete each file + for file_info in user_files: + file_id = file_info.get("id") + if file_id: + try: + delete_url = f"{ONE_MIN_ASSET_URL}/{file_id}" + headers = {"API-KEY": api_key} + + delete_response = api_request("DELETE", delete_url, headers=headers) + + if delete_response.status_code == 200: + logger.info(f"[{request_id}] Scheduled cleanup: deleted file {file_id}") + else: + logger.error( + f"[{request_id}] Scheduled cleanup: failed to delete file {file_id}: {delete_response.status_code}") + except Exception as e: + logger.error( + f"[{request_id}] Scheduled cleanup: error deleting file {file_id}: {str(e)}") + + # Cleaning the list of user files + safe_memcached_operation('set', user_key, json.dumps([])) + logger.info(f"[{request_id}] Cleared files list for user {api_key[:8]}") + except Exception as e: + logger.error(f"[{request_id}] Error processing user {user_key}: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error getting keys from memcached: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error in scheduled cleanup task: {str(e)}") + + # Plan the following execution in an hour + cleanup_timer = threading.Timer(3600, delete_all_files_task) + cleanup_timer.daemon = True + cleanup_timer.start() + logger.info(f"[{request_id}] Scheduled next cleanup in 1 hour") + + +def split_text_for_streaming(text, chunk_size=6): + """ + It breaks the text into small parts to emulate streaming output. + + Args: + Text (str): text for breakdown + chunk_size (int): the approximate size of the parts in words + + Returns: + List: List of parts of the text + """ + if not text: + return [""] + + # We break the text into sentences + sentences = re.split(r'(?<=[.!?])\s+', text) + + # We are grouping sentences to champs + chunks = [] + current_chunk = [] + current_word_count = 0 + + for sentence in sentences: + words_in_sentence = len(sentence.split()) + + # If the current cup is empty or the addition of a sentence does not exceed the limit of words + if not current_chunk or current_word_count + words_in_sentence <= chunk_size: + current_chunk.append(sentence) + current_word_count += words_in_sentence + else: + # We form a cup and begin the new + chunks.append(" ".join(current_chunk)) + current_chunk = [sentence] + current_word_count = words_in_sentence + + # Add the last cup if it is not empty + if current_chunk: + chunks.append(" ".join(current_chunk)) + + # If there is no Cankov (breakdown did not work), we return the entire text entirely + if not chunks: + return [text] + + return chunks + + +def create_image_variations(image_url, user_model, n, aspect_width=None, aspect_height=None, mode=None, + request_id=None): + """ + Creates variations based on the original image, taking into account the specifics of each model. + """ + # Initialize the URL list in front of the cycle + variation_urls = [] + current_model = None + + # We use a temporary ID request if it was not provided + if request_id is None: + request_id = str(uuid.uuid4()) + + # We get saved generation parameters + generation_params = None + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + gen_key = f"gen_params:{request_id}" + params_json = safe_memcached_operation('get', gen_key) + if params_json: + if isinstance(params_json, str): + generation_params = json.loads(params_json) + elif isinstance(params_json, bytes): + generation_params = json.loads(params_json.decode('utf-8')) + logger.debug(f"[{request_id}] Retrieved generation parameters from memcached: {generation_params}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving generation parameters: {str(e)}") + + # We use saved parameters if they are available + if generation_params: + # We take Aspect_width and Aspect_Height from saved parameters if they are + if "aspect_width" in generation_params and "aspect_height" in generation_params: + aspect_width = generation_params.get("aspect_width") + aspect_height = generation_params.get("aspect_height") + logger.debug(f"[{request_id}] Using saved aspect ratio: {aspect_width}:{aspect_height}") + + # We take the mode of saved parameters if it is + if "mode" in generation_params: + mode = generation_params.get("mode") + logger.debug(f"[{request_id}] Using saved mode: {mode}") + + # We determine the list of models for variations + variation_models = [] + if user_model in VARIATION_SUPPORTED_MODELS: + variation_models.append(user_model) + variation_models.extend([m for m in ["midjourney_6_1", "midjourney", "clipdrop", "dall-e-2"] if m != user_model]) + variation_models = list(dict.fromkeys(variation_models)) + + logger.info(f"[{request_id}] Trying image variations with models: {variation_models}") + + # Create a session to download the image + session = create_session() + + try: + # We load the image + image_response = session.get(image_url, stream=True, timeout=60) + if image_response.status_code != 200: + logger.error(f"[{request_id}] Failed to download image: {image_response.status_code}") + return jsonify({"error": "Failed to download image"}), 500 + + # We try each model in turn + for model in variation_models: + current_model = model + logger.info(f"[{request_id}] Trying model: {model} for image variations") + + try: + # Determine the MIME-type image based on the contents or url + content_type = "image/png" # By default + if "content-type" in image_response.headers: + content_type = image_response.headers["content-type"] + elif image_url.lower().endswith(".webp"): + content_type = "image/webp" + elif image_url.lower().endswith(".jpg") or image_url.lower().endswith(".jpeg"): + content_type = "image/jpeg" + elif image_url.lower().endswith(".gif"): + content_type = "image/gif" + + # Determine the appropriate extension for the file + ext = "png" + if "webp" in content_type: + ext = "webp" + elif "jpeg" in content_type or "jpg" in content_type: + ext = "jpg" + elif "gif" in content_type: + ext = "gif" + + logger.debug(f"[{request_id}] Detected image type: {content_type}, extension: {ext}") + + # We load the image to the server with the correct MIME type + files = {"asset": (f"variation.{ext}", image_response.content, content_type)} + upload_response = session.post(ONE_MIN_ASSET_URL, files=files, headers=headers) + + if upload_response.status_code != 200: + logger.error(f"[{request_id}] Image upload failed: {upload_response.status_code}") + continue + + upload_data = upload_response.json() + logger.debug(f"[{request_id}] Asset upload response: {upload_data}") + + # We get the way to the loaded image + image_path = None + if "fileContent" in upload_data and "path" in upload_data["fileContent"]: + image_path = upload_data["fileContent"]["path"] + # We remove the initial slash if it is + if image_path.startswith('/'): + image_path = image_path[1:] + logger.debug(f"[{request_id}] Using relative path for variation: {image_path}") + else: + logger.error(f"[{request_id}] Could not extract image path from upload response") + continue + + # We form Payload depending on the model + if model in ["midjourney_6_1", "midjourney"]: + # For Midjourney + # Add the URL transformation + if image_url and isinstance(image_url, str) and 'asset.1min.ai/' in image_url: + image_url = image_url.split('asset.1min.ai/', 1)[1] + logger.debug(f"[{request_id}] Extracted path from image_url: {image_url}") + + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": image_url if image_url else image_location, + "mode": mode or request_data.get("mode", "fast"), # We use the mode from the prompt + "n": 4, + "isNiji6": False, + "aspect_width": aspect_width or 1, + "aspect_height": aspect_height or 1, + "maintainModeration": True + } + } + # Detailed logistics for Midjourney + logger.info(f"[{request_id}] Midjourney variation payload:") + logger.info(f"[{request_id}] promptObject: {json.dumps(payload['promptObject'], indent=2)}") + elif model == "dall-e-2": + # For Dall-E 2 + payload = { + "type": "IMAGE_VARIATOR", + "model": "dall-e-2", + "promptObject": { + "imageUrl": image_path, + "n": 1, + "size": "1024x1024" + } + } + logger.info(f"[{request_id}] DALL-E 2 variation payload: {json.dumps(payload, indent=2)}") + + # We send a request through the main API URL + variation_response = api_request( + "POST", + ONE_MIN_API_URL, + headers=headers, + json=payload, + timeout=300 + ) + + if variation_response.status_code != 200: + logger.error( + f"[{request_id}] DALL-E 2 variation failed: {variation_response.status_code}, {variation_response.text}") + continue + + # We process the answer + variation_data = variation_response.json() + + # We extract the URL from the answer + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + + if variation_urls: + logger.info( + f"[{request_id}] Successfully created {len(variation_urls)} variations with DALL-E 2") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in DALL-E 2 response") + elif model == "clipdrop": + # For Clipdrop + payload = { + "type": "IMAGE_VARIATOR", + "model": "clipdrop", + "promptObject": { + "imageUrl": image_path, + "n": n + } + } + logger.info(f"[{request_id}] Clipdrop variation payload: {json.dumps(payload, indent=2)}") + + # We send a request through the main API URL + variation_response = api_request( + "POST", + ONE_MIN_API_URL, + headers=headers, + json=payload, + timeout=300 + ) + + if variation_response.status_code != 200: + logger.error( + f"[{request_id}] Clipdrop variation failed: {variation_response.status_code}, {variation_response.text}") + continue + + # We process the answer + variation_data = variation_response.json() + + # We extract the URL from the answer + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + + if variation_urls: + logger.info( + f"[{request_id}] Successfully created {len(variation_urls)} variations with Clipdrop") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in Clipdrop response") + + logger.debug(f"[{request_id}] Sending variation request to URL: {ONE_MIN_API_URL}") + logger.debug(f"[{request_id}] Using headers: {json.dumps(headers)}") + + # We send a request to create a variation + timeout = MIDJOURNEY_TIMEOUT if model.startswith("midjourney") else DEFAULT_TIMEOUT + logger.debug(f"Using extended timeout for Midjourney: {timeout}s") + + variation_response = api_request( + "POST", + ONE_MIN_API_URL, + headers=headers, + json=payload, + timeout=timeout + ) + + if variation_response.status_code != 200: + logger.error( + f"[{request_id}] Variation request with model {model} failed: {variation_response.status_code} - {variation_response.text}") + continue + + # We process the answer + variation_data = variation_response.json() + + # We extract the URL variations from the answer + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with {model}") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in response for model {model}") + + except Exception as e: + logger.error(f"[{request_id}] Error with model {model}: {str(e)}") + continue + + # If you could not create variations with any model + if not variation_urls: + logger.error(f"[{request_id}] Failed to create variations with any available model") + return jsonify({"error": "Failed to create image variations with any available model"}), 500 + + # We form an answer + openai_response = { + "created": int(time.time()), + "data": [] + } + + for url in variation_urls: + openai_data = { + "url": url + } + openai_response["data"].append(openai_data) + + # We form a markdown text with a hint + text_lines = [] + for i, url in enumerate(variation_urls, 1): + text_lines.append(f"Image {i} ({url}) [_V{i}_]") + text_lines.append( + "\n> To generate **variants** of **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**") + + text_response = "\n".join(text_lines) + + openai_response["choices"] = [{ + "message": { + "role": "assistant", + "content": text_response + }, + "index": 0, + "finish_reason": "stop" + }] + + logger.info(f"[{request_id}] Returning {len(variation_urls)} variation URLs to client") + + response = jsonify(openai_response) + return set_response_headers(response) + + except Exception as e: + logger.error(f"[{request_id}] Exception during image variation: {str(e)}") + logger.error(traceback.format_exc()) + return jsonify({"error": str(e)}), 500 + finally: + session.close() + + +# Run the task at the start of the server +if __name__ == "__main__": + # Launch the task of deleting files + delete_all_files_task() + + # Run the application + internal_ip = socket.gethostbyname(socket.gethostname()) + try: + response = requests.get("https://api.ipify.org") + public_ip = response.text + except: + public_ip = "not found" + + logger.info( + f"""{printedcolors.Color.fg.lightcyan} +Server is ready to serve at: +Internal IP: {internal_ip}:{PORT} +Public IP: {public_ip} (only if you've setup port forwarding on your router.) +Enter this url to OpenAI clients supporting custom endpoint: +{internal_ip}:{PORT}/v1 +If does not work, try: +{internal_ip}:{PORT}/v1/chat/completions +{printedcolors.Color.reset}""" + ) + + serve( + app, host="0.0.0.0", port=PORT, threads=6 + ) # Thread has a default of 4 if not specified. We use 6 to increase performance and allow multiple requests at once. + + + + diff --git a/main.py b/main.py deleted file mode 100644 index ed2b9a8..0000000 --- a/main.py +++ /dev/null @@ -1,455 +0,0 @@ -from flask import Flask, request, jsonify, make_response, Response -import requests -import time -import uuid -import warnings -from waitress import serve -import json -import tiktoken -import socket -from mistral_common.tokens.tokenizers.mistral import MistralTokenizer -from mistral_common.protocol.instruct.messages import UserMessage -from mistral_common.protocol.instruct.request import ChatCompletionRequest -from pymemcache.client.base import Client -from flask_limiter import Limiter -from flask_limiter.util import get_remote_address -import os -import logging -from io import BytesIO -import coloredlogs -import printedcolors -import base64 - -# Suppress warnings from flask_limiter -warnings.filterwarnings("ignore", category=UserWarning, module="flask_limiter.extension") - -# Create a logger object -logger = logging.getLogger("1min-relay") - -# Install coloredlogs with desired log level -coloredlogs.install(level='DEBUG', logger=logger) - -def check_memcached_connection(host='memcached', port=11211): - try: - client = Client((host, port)) - client.set('test_key', 'test_value') - if client.get('test_key') == b'test_value': - client.delete('test_key') # Clean up - return True - else: - return False - except: - return False - -logger.info(''' - _ __ __ _ ___ _ - / | \/ (_)_ _ | _ \___| |__ _ _ _ - | | |\/| | | ' \| / -_) / _` | || | - |_|_| |_|_|_||_|_|_\___|_\__,_|\_, | - |__/ ''') - - -def calculate_token(sentence, model="DEFAULT"): - """Calculate the number of tokens in a sentence based on the specified model.""" - - if model.startswith("mistral"): - # Initialize the Mistral tokenizer - tokenizer = MistralTokenizer.v3(is_tekken=True) - model_name = "open-mistral-nemo" # Default to Mistral Nemo - tokenizer = MistralTokenizer.from_model(model_name) - tokenized = tokenizer.encode_chat_completion( - ChatCompletionRequest( - messages=[ - UserMessage(content=sentence), - ], - model=model_name, - ) - ) - tokens = tokenized.tokens - return len(tokens) - - elif model in ["gpt-3.5-turbo", "gpt-4"]: - # Use OpenAI's tiktoken for GPT models - encoding = tiktoken.encoding_for_model(model) - tokens = encoding.encode(sentence) - return len(tokens) - - else: - # Default to openai - encoding = tiktoken.encoding_for_model("gpt-4") - tokens = encoding.encode(sentence) - return len(tokens) -app = Flask(__name__) -if check_memcached_connection(): - limiter = Limiter( - get_remote_address, - app=app, - storage_uri="memcached://memcached:11211", # Connect to Memcached created with docker - ) -else: - # Used for ratelimiting without memcached - limiter = Limiter( - get_remote_address, - app=app, - ) - logger.warning("Memcached is not available. Using in-memory storage for rate limiting. Not-Recommended") - - -ONE_MIN_API_URL = "https://api.1min.ai/api/features" -ONE_MIN_CONVERSATION_API_URL = "https://api.1min.ai/api/conversations" -ONE_MIN_CONVERSATION_API_STREAMING_URL = "https://api.1min.ai/api/features?isStreaming=true" -ONE_MIN_ASSET_URL = "https://api.1min.ai/api/assets" - -# Define the models that are available for use -ALL_ONE_MIN_AVAILABLE_MODELS = [ - "deepseek-chat", - "o1-preview", - "o1-mini", - "gpt-4o-mini", - "gpt-4o", - "gpt-4-turbo", - "gpt-4", - "gpt-3.5-turbo", - "claude-instant-1.2", - "claude-2.1", - "claude-3-5-sonnet-20240620", - "claude-3-opus-20240229", - "claude-3-sonnet-20240229", - "claude-3-haiku-20240307", - "gemini-1.0-pro", - "gemini-1.5-pro", - "gemini-1.5-flash", - "mistral-large-latest", - "mistral-small-latest", - "mistral-nemo", - "open-mistral-7b", - - # Replicate - "meta/llama-2-70b-chat", - "meta/meta-llama-3-70b-instruct", - "meta/meta-llama-3.1-405b-instruct", - "command" -] - -# Define the models that support vision inputs -vision_supported_models = [ - "gpt-4o", - "gpt-4o-mini", - "gpt-4-turbo" -] - - -# Default values -SUBSET_OF_ONE_MIN_PERMITTED_MODELS = ["mistral-nemo", "gpt-4o", "deepseek-chat"] -PERMIT_MODELS_FROM_SUBSET_ONLY = False - -# Read environment variables -one_min_models_env = os.getenv("SUBSET_OF_ONE_MIN_PERMITTED_MODELS") # e.g. "mistral-nemo,gpt-4o,deepseek-chat" -permit_not_in_available_env = os.getenv("PERMIT_MODELS_FROM_SUBSET_ONLY") # e.g. "True" or "False" - -# Parse or fall back to defaults -if one_min_models_env: - SUBSET_OF_ONE_MIN_PERMITTED_MODELS = one_min_models_env.split(",") - -if permit_not_in_available_env and permit_not_in_available_env.lower() == "true": - PERMIT_MODELS_FROM_SUBSET_ONLY = True - -# Combine into a single list -AVAILABLE_MODELS = [] -AVAILABLE_MODELS.extend(SUBSET_OF_ONE_MIN_PERMITTED_MODELS) - -@app.route('/', methods=['GET', 'POST']) -def index(): - if request.method == 'POST': - return ERROR_HANDLER(1212) - if request.method == 'GET': - internal_ip = socket.gethostbyname(socket.gethostname()) - return "Congratulations! Your API is working! You can now make requests to the API.\n\nEndpoint: " + internal_ip + ':5001/v1' -@app.route('/v1/models') -@limiter.limit("500 per minute") -def models(): - # Dynamically create the list of models with additional fields - models_data = [] - if not PERMIT_MODELS_FROM_SUBSET_ONLY: - one_min_models_data = [ - { - "id": model_name, - "object": "model", - "owned_by": "1minai", - "created": 1727389042 - } - for model_name in ALL_ONE_MIN_AVAILABLE_MODELS - ] - else: - one_min_models_data = [ - {"id": model_name, "object": "model", "owned_by": "1minai", "created": 1727389042} - for model_name in SUBSET_OF_ONE_MIN_PERMITTED_MODELS - ] - models_data.extend(one_min_models_data) - return jsonify({"data": models_data, "object": "list"}) - -def ERROR_HANDLER(code, model=None, key=None): - # Handle errors in OpenAI-Structued Error - error_codes = { # Internal Error Codes - 1002: {"message": f"The model {model} does not exist.", "type": "invalid_request_error", "param": None, "code": "model_not_found", "http_code": 400}, - 1020: {"message": f"Incorrect API key provided: {key}. You can find your API key at https://app.1min.ai/api.", "type": "authentication_error", "param": None, "code": "invalid_api_key", "http_code": 401}, - 1021: {"message": "Invalid Authentication", "type": "invalid_request_error", "param": None, "code": None, "http_code": 401}, - 1212: {"message": f"Incorrect Endpoint. Please use the /v1/chat/completions endpoint.", "type": "invalid_request_error", "param": None, "code": "model_not_supported", "http_code": 400}, - 1044: {"message": f"This model does not support image inputs.", "type": "invalid_request_error", "param": None, "code": "model_not_supported", "http_code": 400}, - 1412: {"message": f"No message provided.", "type": "invalid_request_error", "param": "messages", "code": "invalid_request_error", "http_code": 400}, - 1423: {"message": f"No content in last message.", "type": "invalid_request_error", "param": "messages", "code": "invalid_request_error", "http_code": 400}, - } - error_data = {k: v for k, v in error_codes.get(code, {"message": "Unknown error", "type": "unknown_error", "param": None, "code": None}).items() if k != "http_code"} # Remove http_code from the error data - logger.error(f"An error has occurred while processing the user's request. Error code: {code}") - return jsonify({"error": error_data}), error_codes.get(code, {}).get("http_code", 400) # Return the error data without http_code inside the payload and get the http_code to return. - -def format_conversation_history(messages, new_input): - """ - Formats the conversation history into a structured string. - - Args: - messages (list): List of message dictionaries from the request - new_input (str): The new user input message - - Returns: - str: Formatted conversation history - """ - formatted_history = ["Conversation History:\n"] - - for message in messages: - role = message.get('role', '').capitalize() - content = message.get('content', '') - - # Handle potential list content - if isinstance(content, list): - content = '\n'.join(item['text'] for item in content if 'text' in item) - - formatted_history.append(f"{role}: {content}") - - # Append additional messages only if there are existing messages - if messages: # Save credits if it is the first message. - formatted_history.append("Respond like normal. The conversation history will be automatically updated on the next MESSAGE. DO NOT ADD User: or Assistant: to your output. Just respond like normal.") - formatted_history.append("User Message:\n") - formatted_history.append(new_input) - - return '\n'.join(formatted_history) - - -@app.route('/v1/chat/completions', methods=['POST', 'OPTIONS']) -@limiter.limit("500 per minute") -def conversation(): - if request.method == 'OPTIONS': - return handle_options_request() - image = False - - - auth_header = request.headers.get('Authorization') - if not auth_header or not auth_header.startswith("Bearer "): - logger.error("Invalid Authentication") - return ERROR_HANDLER(1021) - - api_key = auth_header.split(" ")[1] - - headers = { - 'API-KEY': api_key - } - - request_data = request.json - - all_messages = format_conversation_history(request_data.get('messages', []), request_data.get('new_input', '')) - - messages = request_data.get('messages', []) - if not messages: - return ERROR_HANDLER(1412) - - user_input = messages[-1].get('content') - if not user_input: - return ERROR_HANDLER(1423) - - # Check if user_input is a list and combine text if necessary - image = False - if isinstance(user_input, list): - image_paths = [] - for item in user_input: - if 'text' in item: - combined_text = '\n'.join(item['text']) - try: - if 'image_url' in item: - if request_data.get('model', 'mistral-nemo') not in vision_supported_models: - return ERROR_HANDLER(1044, request_data.get('model', 'mistral-nemo')) - if item['image_url']['url'].startswith("data:image/png;base64,"): - base64_image = item['image_url']['url'].split(",")[1] - binary_data = base64.b64decode(base64_image) - else: - binary_data = requests.get(item['image_url']['url']) - binary_data.raise_for_status() # Raise an error for bad responses - binary_data = BytesIO(binary_data.content) - files = { - 'asset': ("relay" + str(uuid.uuid4()), binary_data, 'image/png') - } - asset = requests.post(ONE_MIN_ASSET_URL, files=files, headers=headers) - asset.raise_for_status() # Raise an error for bad responses - image_path = asset.json()['fileContent']['path'] - image_paths.append(image_path) - image = True - except Exception as e: - print(f"An error occurred e:" + str(e)[:60]) - # Optionally log the error or return an appropriate response - - user_input = str(combined_text) - - prompt_token = calculate_token(str(all_messages)) - if PERMIT_MODELS_FROM_SUBSET_ONLY and request_data.get('model', 'mistral-nemo') not in AVAILABLE_MODELS: - return ERROR_HANDLER(1002, request_data.get('model', 'mistral-nemo')) # Handle invalid model - - logger.debug(f"Proccessing {prompt_token} prompt tokens with model {request_data.get('model', 'mistral-nemo')}") - - if not image: - payload = { - "type": "CHAT_WITH_AI", - "model": request_data.get('model', 'mistral-nemo'), - "promptObject": { - "prompt": all_messages, - "isMixed": False, - "webSearch": False - } - } - else: - payload = { - "type": "CHAT_WITH_IMAGE", - "model": request_data.get('model', 'mistral-nemo'), - "promptObject": { - "prompt": all_messages, - "isMixed": False, - "imageList": image_paths - } - } - - headers = {"API-KEY": api_key, 'Content-Type': 'application/json'} - - if not request_data.get('stream', False): - # Non-Streaming Response - logger.debug("Non-Streaming AI Response") - response = requests.post(ONE_MIN_API_URL, json=payload, headers=headers) - response.raise_for_status() - one_min_response = response.json() - - transformed_response = transform_response(one_min_response, request_data, prompt_token) - response = make_response(jsonify(transformed_response)) - set_response_headers(response) - - return response, 200 - - else: - # Streaming Response - logger.debug("Streaming AI Response") - response_stream = requests.post(ONE_MIN_CONVERSATION_API_STREAMING_URL, data=json.dumps(payload), headers=headers, stream=True) - if response_stream.status_code != 200: - if response_stream.status_code == 401: - return ERROR_HANDLER(1020) - logger.error(f"An unknown error occurred while processing the user's request. Error code: {response_stream.status_code}") - return ERROR_HANDLER(response_stream.status_code) - return Response(stream_response(response_stream, request_data, request_data.get('model', 'mistral-nemo'), int(prompt_token)), content_type='text/event-stream') -def handle_options_request(): - response = make_response() - response.headers.add('Access-Control-Allow-Origin', '*') - response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') - response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS') - return response, 204 - -def transform_response(one_min_response, request_data, prompt_token): - completion_token = calculate_token(one_min_response['aiRecord']["aiRecordDetail"]["resultObject"][0]) - logger.debug(f"Finished processing Non-Streaming response. Completion tokens: {str(completion_token)}") - logger.debug(f"Total tokens: {str(completion_token + prompt_token)}") - return { - "id": f"chatcmpl-{uuid.uuid4()}", - "object": "chat.completion", - "created": int(time.time()), - "model": request_data.get('model', 'mistral-nemo'), - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": one_min_response['aiRecord']["aiRecordDetail"]["resultObject"][0], - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": prompt_token, - "completion_tokens": completion_token, - "total_tokens": prompt_token + completion_token - } - } - -def set_response_headers(response): - response.headers['Content-Type'] = 'application/json' - response.headers['Access -Control-Allow-Origin'] = '*' - response.headers['X-Request-ID'] = str (uuid.uuid4()) - -def stream_response(response, request_data, model, prompt_tokens): - all_chunks = "" - for chunk in response.iter_content(chunk_size=1024): - finish_reason = None - - return_chunk = { - "id": f"chatcmpl-{uuid.uuid4()}", - "object": "chat.completion.chunk", - "created": int(time.time()), - "model": request_data.get('model', 'mistral-nemo'), - "choices": [ - { - "index": 0, - "delta": { - "content": chunk.decode('utf-8') - }, - "finish_reason": finish_reason - } - ] - } - all_chunks += chunk.decode('utf-8') - yield f"data: {json.dumps(return_chunk)}\n\n" - - tokens = calculate_token(all_chunks) - logger.debug(f"Finished processing streaming response. Completion tokens: {str(tokens)}") - logger.debug(f"Total tokens: {str(tokens + prompt_tokens)}") - - # Final chunk when iteration stops - final_chunk = { - "id": f"chatcmpl-{uuid.uuid4()}", - "object": "chat.completion.chunk", - "created": int(time.time()), - "model": request_data.get('model', 'mistral-nemo'), - "choices": [ - { - "index": 0, - "delta": { - "content": "" - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": prompt_tokens, - "completion_tokens": tokens, - "total_tokens": tokens + prompt_tokens - } - } - yield f"data: {json.dumps(final_chunk)}\n\n" - yield "data: [DONE]\n\n" - -if __name__ == '__main__': - internal_ip = socket.gethostbyname(socket.gethostname()) - response = requests.get('https://api.ipify.org') - public_ip = response.text - logger.info(f"""{printedcolors.Color.fg.lightcyan} -Server is ready to serve at: -Internal IP: {internal_ip}:5001 -Public IP: {public_ip} (only if you've setup port forwarding on your router.) -Enter this url to OpenAI clients supporting custom endpoint: -{internal_ip}:5001/v1 -If does not work, try: -{internal_ip}:5001/v1/chat/completions -{printedcolors.Color.reset}""") - serve(app, host='0.0.0.0', port=5001, threads=6) # Thread has a default of 4 if not specified. We use 6 to increase performance and allow multiple requests at once. diff --git a/requirements.txt b/requirements.txt index 853681d..c9f32d8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,14 @@ flask==3.1.0 +flask_cors==4.0.0 requests==2.32.3 waitress==3.0.2 -mistral_common==1.5.3 -flask-limiter==3.10.1 -limits[memcached]==4.0.1 +mistral_common==1.5.4 +flask-limiter==3.11.0 +limits[memcached]==4.4.1 coloredlogs==15.0.1 printedcolors==1.0.0 -pymemcache==4.0.0 \ No newline at end of file +pymemcache==4.0.0 +tiktoken>=0.7.0 +python-memcached==1.62 +python-dotenv==1.0.1 +werkzeug>=3.1.0 diff --git a/routes/__init__.py b/routes/__init__.py new file mode 100644 index 0000000..687a8f9 --- /dev/null +++ b/routes/__init__.py @@ -0,0 +1,59 @@ +# routes/__init__.py +# Инициализация пакета routes + +# Импортируем необходимые модули +from utils.logger import logger +from utils.imports import * +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) + +# Делаем app и limiter доступными при импорте routes +import sys +mod = sys.modules[__name__] + +# Импортируем app и limiter из корневого модуля +try: + import app as root_app + # Переносим объекты в текущий модуль + mod.app = root_app.app + mod.limiter = root_app.limiter + mod.IMAGE_CACHE = root_app.IMAGE_CACHE + mod.MEMORY_STORAGE = root_app.MEMORY_STORAGE + mod.MAX_CACHE_SIZE = 100 # Максимальный размер кэша изображений + logger.info("Глобальные объекты успешно переданы в модуль маршрутов") + + # Импортируем модуль функций вместо прямого импорта из shared_func + from .functions import ( + validate_auth, + handle_api_error, + format_openai_response, + format_image_response, + stream_response, + get_full_url, + extract_data_from_api_response, + extract_text_from_response, + extract_image_urls, + extract_audio_url + ) + + # Импортируем модули маршрутов (blueprints регистрируются при импорте) + from . import text, images, audio, files + + logger.info("Все модули маршрутов импортированы") + + # Обеспечиваем доступ к маршрутам из корневого модуля + root_app.routes = mod + logger.info("Модуль маршрутов добавлен в корневой модуль app") + +except ImportError as e: + logger.error(f"Не удалось импортировать app.py: {str(e)}. Маршруты могут работать некорректно.") + +logger.info("Инициализация маршрутов завершена") diff --git a/routes/audio.py b/routes/audio.py new file mode 100644 index 0000000..51c08af --- /dev/null +++ b/routes/audio.py @@ -0,0 +1,339 @@ +# version 1.0.1 #increment every time you make changes +# routes/audio.py + +# Импортируем только необходимые модули +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) +from utils.memcached import safe_memcached_operation +from routes.functions.shared_func import validate_auth, handle_api_error, extract_text_from_response, extract_audio_url +from routes.functions.audio_func import upload_audio_file, try_models_in_sequence, prepare_models_list, prepare_whisper_payload, prepare_tts_payload +from . import app, limiter, MEMORY_STORAGE + +@app.route("/v1/audio/transcriptions", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def audio_transcriptions(): + """ + Route for converting speech into text (analogue of Openai Whisper API) + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/audio/transcriptions") + + # Проверяем авторизацию + api_key, error = validate_auth(request, request_id) + if error: + return error + + # Проверяем наличие аудио файла + if "file" not in request.files: + logger.error(f"[{request_id}] No audio file provided") + return jsonify({"error": "No audio file provided"}), 400 + + audio_file = request.files["file"] + model = request.form.get("model", "whisper-1") + response_format = request.form.get("response_format", "text") + language = request.form.get("language", None) + temperature = request.form.get("temperature", 0) + + logger.info(f"[{request_id}] Processing audio transcription with model {model}") + + try: + # Загружаем аудио файл + audio_path, error = upload_audio_file(audio_file, api_key, request_id) + if error: + return error + + # Подготовка списка моделей для перебора + models_to_try = prepare_models_list(model, SPEECH_TO_TEXT_MODELS) + logger.debug(f"[{request_id}] Will try these models in order: {models_to_try}") + + # Функция для создания payload + def create_transcription_payload(current_model): + payload = { + "type": "SPEECH_TO_TEXT", + "model": current_model, + "promptObject": { + "audioUrl": audio_path, + "response_format": response_format, + }, + } + + # Добавляем дополнительные параметры если они предоставлены + if language: + payload["promptObject"]["language"] = language + + if temperature is not None: + payload["promptObject"]["temperature"] = float(temperature) + + return payload + + # Пробуем модели по очереди + one_min_response, error = try_models_in_sequence( + models_to_try, create_transcription_payload, api_key, request_id + ) + + if error: + if isinstance(error, requests.Response): + if error.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + + logger.error(f"[{request_id}] API error: {error.text[:200] if hasattr(error, 'text') else str(error)}") + error_text = "No available providers at the moment" + try: + error_json = error.json() + if "error" in error_json: + error_text = error_json["error"] + except: + pass + + return jsonify({"error": f"All available models failed. {error_text}"}), error.status_code + else: + logger.error(f"[{request_id}] Error: {str(error)}") + return jsonify({"error": f"All available models failed. {str(error)}"}), 500 + + # Извлекаем текст из ответа + result_text = extract_text_from_response(one_min_response, request_id) + + if not result_text: + logger.error(f"[{request_id}] Could not extract transcription text from API response") + return jsonify({"error": "Could not extract transcription text"}), 500 + + logger.info(f"[{request_id}] Successfully processed audio transcription") + + # Создаем json строго в формате Openai API + response_data = {"text": result_text} + response = jsonify(response_data) + set_response_headers(response) + return response + + except Exception as e: + logger.error(f"[{request_id}] Exception during transcription request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/audio/translations", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def audio_translations(): + """ + Route for translating audio to text (analogue Openai Whisper API) + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/audio/translations") + + # Проверяем авторизацию + api_key, error = validate_auth(request, request_id) + if error: + return error + + # Проверяем наличие аудио файла + if "file" not in request.files: + logger.error(f"[{request_id}] No audio file provided") + return jsonify({"error": "No audio file provided"}), 400 + + audio_file = request.files["file"] + model = request.form.get("model", "whisper-1") + response_format = request.form.get("response_format", "text") + temperature = request.form.get("temperature", 0) + + logger.info(f"[{request_id}] Processing audio translation with model {model}") + + try: + # Загружаем аудио файл + audio_path, error = upload_audio_file(audio_file, api_key, request_id) + if error: + return error + + # Подготовка списка моделей для перебора + models_to_try = prepare_models_list(model, SPEECH_TO_TEXT_MODELS) + logger.debug(f"[{request_id}] Will try these models in order: {models_to_try}") + + # Функция для создания payload + def create_translation_payload(current_model): + return { + "type": "AUDIO_TRANSLATOR", + "model": current_model, + "promptObject": { + "audioUrl": audio_path, + "response_format": response_format, + "temperature": float(temperature), + }, + } + + # Пробуем модели по очереди + one_min_response, error = try_models_in_sequence( + models_to_try, create_translation_payload, api_key, request_id + ) + + if error: + if isinstance(error, requests.Response): + if error.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + + logger.error(f"[{request_id}] API error: {error.text[:200] if hasattr(error, 'text') else str(error)}") + error_text = "No available providers at the moment" + try: + error_json = error.json() + if "error" in error_json: + error_text = error_json["error"] + except: + pass + + return jsonify({"error": f"All available models failed. {error_text}"}), error.status_code + else: + logger.error(f"[{request_id}] Error: {str(error)}") + return jsonify({"error": f"All available models failed. {str(error)}"}), 500 + + # Извлекаем текст из ответа + result_text = extract_text_from_response(one_min_response, request_id) + + if not result_text: + logger.error(f"[{request_id}] Could not extract translation text from API response") + return jsonify({"error": "Could not extract translation text"}), 500 + + logger.info(f"[{request_id}] Successfully processed audio translation") + + # Создаем json строго в формате Openai API + response_data = {"text": result_text} + response = jsonify(response_data) + set_response_headers(response) + return response + + except Exception as e: + logger.error(f"[{request_id}] Exception during translation request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/audio/speech", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def text_to_speech(): + """ + Route for converting text into speech (analogue Openai TTS API) + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = request.args.get('request_id', str(uuid.uuid4())[:8]) + logger.info(f"[{request_id}] Received request: /v1/audio/speech") + + # Проверяем авторизацию + api_key, error = validate_auth(request, request_id) + if error: + return error + + # Получаем данные запроса + request_data = {} + + # Проверяем наличие данных в Memcached если запрос был перенаправлен + if 'request_id' in request.args and 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + tts_session_key = f"tts_request_{request.args.get('request_id')}" + try: + session_data = safe_memcached_operation('get', tts_session_key) + if session_data: + if isinstance(session_data, str): + request_data = json.loads(session_data) + elif isinstance(session_data, bytes): + request_data = json.loads(session_data.decode('utf-8')) + else: + request_data = session_data + + # Удаляем данные из кэша, они больше не нужны + safe_memcached_operation('delete', tts_session_key) + logger.debug(f"[{request_id}] Retrieved TTS request data from memcached") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving TTS session data: {str(e)}") + + # Если данные не найдены в Memcache, пробуем получить их из тела запроса + if not request_data and request.is_json: + request_data = request.json + + model = request_data.get("model", "tts-1") + input_text = request_data.get("input", "") + voice = request_data.get("voice", "alloy") + response_format = request_data.get("response_format", "mp3") + speed = request_data.get("speed", 1.0) + + logger.info(f"[{request_id}] Processing TTS request with model {model}") + logger.debug(f"[{request_id}] Text input: {input_text[:100]}..." if input_text and len(input_text) > 100 else f"[{request_id}] Text input: {input_text}") + + if not input_text: + logger.error(f"[{request_id}] No input text provided") + return jsonify({"error": "No input text provided"}), 400 + + try: + # Используем функцию prepare_tts_payload для формирования запроса + payload = prepare_tts_payload(model, input_text, voice, speed, response_format) + + # Логируем полный payload для отладки + logger.debug(f"[{request_id}] TTS payload: {json.dumps(payload, ensure_ascii=False)}") + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # Отправляем запрос + logger.debug(f"[{request_id}] Sending TTS request to {ONE_MIN_API_URL}") + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug(f"[{request_id}] TTS response status code: {response.status_code}") + + if response.status_code != 200: + # Логируем полный ответ для отладки + error_text = "Unknown error" + try: + error_data = response.json() + error_text = json.dumps(error_data, ensure_ascii=False) + logger.error(f"[{request_id}] Detailed error response: {error_text}") + except: + if hasattr(response, 'text'): + error_text = response.text + + return handle_api_error(response, api_key, request_id) + + # Обрабатываем ответ + one_min_response = response.json() + + try: + # Получаем URL аудио из ответа + audio_url = extract_audio_url(one_min_response, request_id) + + if not audio_url: + logger.error(f"[{request_id}] Could not extract audio URL from API response") + return jsonify({"error": "Could not extract audio URL"}), 500 + + # Получаем аудио данные по URL + audio_response = api_request("GET", f"https://asset.1min.ai/{audio_url}") + + if audio_response.status_code != 200: + logger.error(f"[{request_id}] Failed to download audio: {audio_response.status_code}") + return jsonify({"error": "Failed to download audio"}), 500 + + # Возвращаем аудио клиенту + logger.info(f"[{request_id}] Successfully generated speech audio") + + # Создаем ответ с аудио и правильным MIME-type + content_type = "audio/mpeg" if response_format == "mp3" else f"audio/{response_format}" + response = make_response(audio_response.content) + response.headers["Content-Type"] = content_type + set_response_headers(response) + + return response + + except Exception as e: + logger.error(f"[{request_id}] Error processing TTS response: {str(e)}") + return jsonify({"error": str(e)}), 500 + + except Exception as e: + logger.error(f"[{request_id}] Exception during TTS request: {str(e)}") + return jsonify({"error": str(e)}), 500 diff --git a/routes/files.py b/routes/files.py new file mode 100644 index 0000000..2e82245 --- /dev/null +++ b/routes/files.py @@ -0,0 +1,228 @@ +# routes/files.py + +# Импортируем только необходимые модули +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ERROR_HANDLER, handle_options_request, set_response_headers, api_request +from utils.memcached import safe_memcached_operation +from . import app, limiter +from .functions import ( + validate_auth, + handle_api_error, + get_user_files, + save_user_files, + upload_asset, + get_mime_type, + format_file_response, + create_api_response, + find_file_by_id +) + +# Маршруты для работы с файлами +@app.route("/v1/files", methods=["GET", "POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def handle_files(): + """ + Route for working with files: getting a list and downloading new files + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + + # Проверяем авторизацию + api_key, error = validate_auth(request, request_id) + if error: + return error + + # GET - получение списка файлов + if request.method == "GET": + logger.info(f"[{request_id}] Received request: GET /v1/files") + try: + # Получаем список файлов пользователя + user_files = get_user_files(api_key, request_id) + + # Формируем ответ в формате OpenAI API + files_data = [] + for file_info in user_files: + if isinstance(file_info, dict) and "id" in file_info: + files_data.append(format_file_response(file_info)) + + response_data = { + "data": files_data, + "object": "list" + } + + return create_api_response(response_data) + + except Exception as e: + logger.error(f"[{request_id}] Exception during files list request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + # POST - загрузка нового файла + elif request.method == "POST": + logger.info(f"[{request_id}] Received request: POST /v1/files") + + # Проверяем наличие файла + if "file" not in request.files: + logger.error(f"[{request_id}] No file provided") + return jsonify({"error": "No file provided"}), 400 + + file = request.files["file"] + purpose = request.form.get("purpose", "assistants") + + try: + # Получаем MIME тип файла + mime_type, file_type = get_mime_type(file.filename) + + # Сохраняем содержимое файла, чтобы можно было получить и размер, и передать данные + file_content = file.read() + file_size = len(file_content) + + # Загружаем файл + file_id, file_path, error = upload_asset( + file_content, + file.filename, + mime_type, + api_key, + request_id, + file_type + ) + + if error: + return error + + # Получаем текущий список файлов пользователя + user_files = get_user_files(api_key, request_id) + + # Добавляем новый файл в список + file_info = { + "id": file_id, + "filename": file.filename, + "bytes": file_size, + "created_at": int(time.time()) + } + user_files.append(file_info) + + # Сохраняем обновленный список + save_user_files(api_key, user_files, request_id) + + # Формируем ответ + response_data = format_file_response(file_info, purpose=purpose) + + logger.info(f"[{request_id}] File uploaded successfully: {file_id}") + return create_api_response(response_data) + + except Exception as e: + logger.error(f"[{request_id}] Exception during file upload: {str(e)}") + return jsonify({"error": str(e)}), 500 + +@app.route("/v1/files/", methods=["GET", "DELETE", "OPTIONS"]) +@limiter.limit("60 per minute") +def handle_file(file_id): + """ + Route for working with a specific file: obtaining information and deleting + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + + # Проверяем авторизацию + api_key, error = validate_auth(request, request_id) + if error: + return error + + # GET - получение информации о файле + if request.method == "GET": + logger.info(f"[{request_id}] Received request: GET /v1/files/{file_id}") + try: + # Ищем файл в списке файлов пользователя + user_files = get_user_files(api_key, request_id) + file_info = find_file_by_id(user_files, file_id) + + if not file_info: + logger.error(f"[{request_id}] File {file_id} not found") + return jsonify({"error": f"File {file_id} not found"}), 404 + + # Формируем ответ + response_data = format_file_response(file_info, file_id) + return create_api_response(response_data) + + except Exception as e: + logger.error(f"[{request_id}] Exception during file info request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + # DELETE - удаление файла + elif request.method == "DELETE": + logger.info(f"[{request_id}] Received request: DELETE /v1/files/{file_id}") + try: + # Получаем список файлов пользователя + user_files = get_user_files(api_key, request_id) + + # Проверяем, существует ли файл + found = False + for file in user_files: + if file.get("id") == file_id: + found = True + break + + if not found: + logger.error(f"[{request_id}] File {file_id} not found") + return jsonify({"error": f"File {file_id} not found"}), 404 + + # Фильтруем список, исключая файл с указанным ID + new_user_files = [f for f in user_files if f.get("id") != file_id] + + # Сохраняем обновленный список + save_user_files(api_key, new_user_files, request_id) + logger.info(f"[{request_id}] Deleted file {file_id} from user's files") + + # Возвращаем ответ об успешном удалении + response_data = { + "id": file_id, + "object": "file", + "deleted": True + } + + return create_api_response(response_data) + + except Exception as e: + logger.error(f"[{request_id}] Exception during file deletion: {str(e)}") + return jsonify({"error": str(e)}), 500 + +@app.route("/v1/files//content", methods=["GET", "OPTIONS"]) +@limiter.limit("60 per minute") +def handle_file_content(file_id): + """ + Route for obtaining the contents of the file + """ + if request.method == "OPTIONS": + return handle_options_request() + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: GET /v1/files/{file_id}/content") + + # Проверяем авторизацию + api_key, error = validate_auth(request, request_id) + if error: + return error + + try: + # Проверяем, существует ли файл + user_files = get_user_files(api_key, request_id) + file_info = find_file_by_id(user_files, file_id) + + if not file_info: + logger.error(f"[{request_id}] File {file_id} not found") + return jsonify({"error": f"File {file_id} not found"}), 404 + + # В 1min.ai нет API для получения содержимого файла по ID + logger.error(f"[{request_id}] File content retrieval not supported") + return jsonify({"error": "File content retrieval not supported"}), 501 + + except Exception as e: + logger.error(f"[{request_id}] Exception during file content request: {str(e)}") + return jsonify({"error": str(e)}), 500 + diff --git a/routes/functions.py b/routes/functions.py new file mode 100644 index 0000000..c4f72b0 --- /dev/null +++ b/routes/functions.py @@ -0,0 +1,71 @@ +# routes/functions.py +# Общие утилиты для маршрутов + +# Реэкспортируем основные зависимости +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) +from utils.memcached import safe_memcached_operation + +# Экспортируем общие функции +from .functions.shared_func import ( + validate_auth, + handle_api_error, + format_openai_response, + format_image_response, + stream_response, + get_full_url, + extract_data_from_api_response, + extract_text_from_response, + extract_image_urls, + extract_audio_url +) + +# Экспортируем функции для текстовых моделей +from .functions.txt_func import ( + format_conversation_history, + get_model_capabilities, + prepare_payload, + transform_response, + emulate_stream_response, + streaming_request +) + +# Экспортируем функции для изображений +from .functions.img_func import ( + build_generation_payload, + parse_aspect_ratio, + create_image_variations, + retry_image_upload +) + +# Экспортируем функции для аудио +from .functions.audio_func import ( + upload_audio_file, + try_models_in_sequence, + prepare_models_list, + prepare_whisper_payload, + prepare_tts_payload +) + +# Экспортируем функции для файлов +from .functions.file_func import ( + get_user_files, + save_user_files, + upload_asset, + get_mime_type, + format_file_response, + create_api_response, + find_file_by_id, + find_conversation_id, + create_conversation_with_files +) diff --git a/routes/functions/__init__.py b/routes/functions/__init__.py new file mode 100644 index 0000000..39e80b8 --- /dev/null +++ b/routes/functions/__init__.py @@ -0,0 +1,56 @@ +# routes/functions/__init__.py +# Инициализация субпакета функций + +# Экспортируем общие функции +from .shared_func import ( + validate_auth, + handle_api_error, + format_openai_response, + format_image_response, + stream_response, + get_full_url, + extract_data_from_api_response, + extract_text_from_response, + extract_image_urls, + extract_audio_url +) + +# Экспортируем функции для текстовых моделей +from .txt_func import ( + format_conversation_history, + get_model_capabilities, + prepare_payload, + transform_response, + emulate_stream_response, + streaming_request +) + +# Экспортируем функции для изображений +from .img_func import ( + build_generation_payload, + parse_aspect_ratio, + create_image_variations, + retry_image_upload +) + +# Экспортируем функции для аудио +from .audio_func import ( + upload_audio_file, + try_models_in_sequence, + prepare_models_list, + prepare_whisper_payload, + prepare_tts_payload +) + +# Экспортируем функции для файлов +from .file_func import ( + get_user_files, + save_user_files, + upload_asset, + get_mime_type, + format_file_response, + create_api_response, + find_file_by_id, + find_conversation_id, + create_conversation_with_files +) diff --git a/routes/functions/audio_func.py b/routes/functions/audio_func.py new file mode 100644 index 0000000..bcfd5c3 --- /dev/null +++ b/routes/functions/audio_func.py @@ -0,0 +1,203 @@ +# version 1.0.1 #increment every time you make changes +# routes/functions/audio_func.py + +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) +from utils.memcached import safe_memcached_operation +from routes.functions.shared_func import extract_text_from_response, extract_audio_url + +#=======================================================# +# ----------- Функции для работы с аудио --------------- +#=======================================================# + +def upload_audio_file(audio_file, api_key, request_id): + """ + Загружает аудио файл в 1min.ai + + Args: + audio_file: Файл аудио + api_key: API ключ + request_id: ID запроса для логирования + + Returns: + tuple: (audio_path, error_response) + audio_path будет None если произошла ошибка + """ + try: + session = create_session() + headers = {"API-KEY": api_key} + files = {"asset": (audio_file.filename, audio_file, "audio/mpeg")} + + try: + asset_response = session.post(ONE_MIN_ASSET_URL, files=files, headers=headers) + logger.debug(f"[{request_id}] Audio upload response status code: {asset_response.status_code}") + + if asset_response.status_code != 200: + error_message = asset_response.json().get("error", "Failed to upload audio") + return None, (jsonify({"error": error_message}), asset_response.status_code) + + audio_path = asset_response.json()["fileContent"]["path"] + logger.debug(f"[{request_id}] Successfully uploaded audio: {audio_path}") + return audio_path, None + finally: + session.close() + except Exception as e: + logger.error(f"[{request_id}] Error uploading audio: {str(e)}") + return None, (jsonify({"error": f"Failed to upload audio: {str(e)}"}), 500) + +def try_models_in_sequence(models_to_try, payload_func, api_key, request_id): + """ + Пробует использовать модели по очереди, пока одна не сработает + + Args: + models_to_try: Список моделей для перебора + payload_func: Функция создания payload для каждой модели + api_key: API ключ + request_id: ID запроса для логирования + + Returns: + tuple: (result, error) + result будет None если все модели завершились с ошибкой + """ + last_error = None + + for current_model in models_to_try: + try: + payload = payload_func(current_model) + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + logger.debug(f"[{request_id}] Trying model {current_model}") + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug(f"[{request_id}] Response status code: {response.status_code} for model {current_model}") + + if response.status_code == 200: + one_min_response = response.json() + return one_min_response, None + else: + # Сохраняем ошибку и пробуем следующую модель + last_error = response + logger.warning(f"[{request_id}] Model {current_model} failed with status {response.status_code}") + + except Exception as e: + logger.error(f"[{request_id}] Error with model {current_model}: {str(e)}") + last_error = e + + # Если мы дошли до сюда, значит ни одна модель не сработала + logger.error(f"[{request_id}] All available models failed") + + # Возвращаем последнюю ошибку + return None, last_error + +def prepare_models_list(requested_model, available_models): + """ + Подготавливает список моделей для обработки + + Args: + requested_model: Запрошенная модель + available_models: Доступные модели + + Returns: + list: Список моделей для обработки + """ + # Проверяем наличие модели в списке доступных + if requested_model in available_models: + # Если модель есть в списке, пробуем её первой + models = [requested_model] + [m for m in available_models if m != requested_model] + else: + # Если модели нет, используем все доступные + models = available_models + + return models + +def prepare_whisper_payload(model, file_path, language=None, prompt=None, temperature=None, response_format=None): + """ + Подготавливает данные для запроса к API транскрипции аудио + + Args: + model: Модель для транскрипции + file_path: Путь к аудиофайлу + language: Язык аудио (опционально) + prompt: Подсказка для транскрипции (опционально) + temperature: Температура генерации (опционально) + response_format: Формат ответа (опционально) + + Returns: + dict: Данные для запроса + """ + # Формируем базовый запрос согласно документации + payload = { + "type": "SPEECH_TO_TEXT", + "model": model, + "promptObject": { + "audioUrl": file_path, + "response_format": response_format or "text" + } + } + + # Добавляем дополнительные параметры, если они указаны + if language: + payload["promptObject"]["language"] = language + + if prompt: + payload["promptObject"]["prompt"] = prompt + + if temperature is not None: + try: + temp = float(temperature) + if 0 <= temp <= 1: + payload["promptObject"]["temperature"] = temp + except (ValueError, TypeError): + pass + + return payload + +def prepare_tts_payload(model, input_text, voice, speed=None, format=None): + """ + Подготавливает данные для запроса к API генерации речи из текста + + Args: + model: Модель для генерации речи + input_text: Текст для озвучивания + voice: Голос для озвучивания + speed: Скорость речи (опционально) + format: Формат аудиофайла (опционально) + + Returns: + dict: Данные для запроса + """ + # Определяем правильный тип запроса и структуру данных + payload = { + "type": "TEXT_TO_SPEECH", + "model": model, + "promptObject": { + "text": input_text, + "voice": voice + } + } + + # Добавляем дополнительные параметры, если они указаны + if speed is not None: + try: + spd = float(speed) + if 0.25 <= spd <= 4.0: + payload["promptObject"]["speed"] = spd + except (ValueError, TypeError): + pass + + if format and format in ["mp3", "opus", "aac", "flac"]: + payload["promptObject"]["response_format"] = format + + logger.debug(f"Подготовлен TTS payload: {json.dumps(payload, ensure_ascii=False)}") + + return payload + diff --git a/routes/functions/file_func.py b/routes/functions/file_func.py new file mode 100644 index 0000000..53190f1 --- /dev/null +++ b/routes/functions/file_func.py @@ -0,0 +1,344 @@ +# routes/functions/file_func.py + +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + create_session, + api_request, + set_response_headers +) +from utils.memcached import safe_memcached_operation +from flask import jsonify, make_response + +#=======================================================# +# ----------- Функции для работы с файлами -------------# +#=======================================================# + +def get_user_files(api_key, request_id=None): + """ + Получает список файлов пользователя из Memcached + + Args: + api_key: API ключ пользователя + request_id: ID запроса для логирования + + Returns: + list: Список файлов пользователя + """ + user_files = [] + try: + user_key = f"user:{api_key}" + user_files_json = safe_memcached_operation('get', user_key) + + if user_files_json: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + + logger.debug(f"[{request_id}] Found {len(user_files)} files for user") + except Exception as e: + logger.error(f"[{request_id}] Error getting user files: {str(e)}") + + return user_files + +def save_user_files(api_key, files, request_id=None): + """ + Сохраняет список файлов пользователя в Memcached + + Args: + api_key: API ключ пользователя + files: Список файлов для сохранения + request_id: ID запроса для логирования + """ + try: + user_key = f"user:{api_key}" + safe_memcached_operation('set', user_key, json.dumps(files)) + logger.debug(f"[{request_id}] Saved {len(files)} files for user") + + # Добавляем пользователя в список известных пользователей + known_users = safe_memcached_operation('get', 'known_users_list') or [] + if isinstance(known_users, str): + known_users = json.loads(known_users) + elif isinstance(known_users, bytes): + known_users = json.loads(known_users.decode('utf-8')) + + if api_key not in known_users: + known_users.append(api_key) + safe_memcached_operation('set', 'known_users_list', json.dumps(known_users)) + logger.debug(f"[{request_id}] Added user to known_users_list") + except Exception as e: + logger.error(f"[{request_id}] Error saving user files: {str(e)}") + +def upload_asset(file_data, filename, mime_type, api_key, request_id=None, file_type=None): + """ + Загружает файл в 1min.ai + + Args: + file_data: Бинарные данные файла + filename: Имя файла + mime_type: MIME тип файла + api_key: API ключ пользователя + request_id: ID запроса для логирования + file_type: Тип файла (DOC/DOCX) для специальной обработки + + Returns: + tuple: (asset_id, asset_path, error_response) + """ + try: + session = create_session() + headers = {"API-KEY": api_key} + + if file_type: + headers["X-File-Type"] = file_type + + files = {"asset": (filename, file_data, mime_type)} + + response = session.post(ONE_MIN_ASSET_URL, files=files, headers=headers) + logger.debug(f"[{request_id}] Asset upload response status code: {response.status_code}") + + if response.status_code != 200: + error = response.json().get("error", "Failed to upload asset") + return None, None, (jsonify({"error": error}), response.status_code) + + response_data = response.json() + + # Извлекаем ID и путь файла + asset_id = None + asset_path = None + + if "id" in response_data: + asset_id = response_data["id"] + elif "fileContent" in response_data: + if "id" in response_data["fileContent"]: + asset_id = response_data["fileContent"]["id"] + elif "uuid" in response_data["fileContent"]: + asset_id = response_data["fileContent"]["uuid"] + + if "path" in response_data["fileContent"]: + asset_path = response_data["fileContent"]["path"] + + if not asset_id and not asset_path: + return None, None, (jsonify({"error": "Could not extract asset information"}), 500) + + logger.debug(f"[{request_id}] Successfully uploaded asset: id={asset_id}, path={asset_path}") + return asset_id, asset_path, None + + except Exception as e: + logger.error(f"[{request_id}] Error uploading asset: {str(e)}") + return None, None, (jsonify({"error": str(e)}), 500) + finally: + session.close() + +def get_mime_type(filename): + """ + Определяет MIME тип файла по расширению + + Args: + filename: Имя файла + + Returns: + tuple: (mime_type, file_type) + file_type будет None для всех файлов кроме DOC/DOCX + """ + extension = os.path.splitext(filename)[1].lower() + + mime_types = { + ".pdf": ("application/pdf", None), + ".txt": ("text/plain", None), + ".doc": ("application/msword", "DOC"), + ".docx": ("application/vnd.openxmlformats-officedocument.wordprocessingml.document", "DOCX"), + ".csv": ("text/csv", None), + ".xls": ("application/vnd.ms-excel", None), + ".xlsx": ("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", None), + ".json": ("application/json", None), + ".md": ("text/markdown", None), + ".html": ("text/html", None), + ".htm": ("text/html", None), + ".xml": ("application/xml", None), + ".pptx": ("application/vnd.openxmlformats-officedocument.presentationml.presentation", None), + ".ppt": ("application/vnd.ms-powerpoint", None), + ".rtf": ("application/rtf", None), + ".png": ("image/png", None), + ".jpg": ("image/jpeg", None), + ".jpeg": ("image/jpeg", None), + ".gif": ("image/gif", None), + ".webp": ("image/webp", None), + ".mp3": ("audio/mpeg", None), + ".wav": ("audio/wav", None), + ".ogg": ("audio/ogg", None), + } + + return mime_types.get(extension, ("application/octet-stream", None)) + +def format_file_response(file_info, file_id=None, purpose="assistants", status="processed"): + """ + Форматирует информацию о файле в формат OpenAI API + + Args: + file_info: Словарь с информацией о файле + file_id: ID файла (если не указан в file_info) + purpose: Назначение файла + status: Статус обработки файла + + Returns: + dict: Информация о файле в формате OpenAI API + """ + # Если file_info не предоставлен, создаем пустой словарь + if file_info is None: + file_info = {} + + # Устанавливаем значения по умолчанию, если не указаны + file_id = file_info.get("id", file_id) + filename = file_info.get("filename", f"file_{file_id}") + bytes_size = file_info.get("bytes", 0) + created_at = file_info.get("created_at", int(time.time())) + + return { + "id": file_id, + "object": "file", + "bytes": bytes_size, + "created_at": created_at, + "filename": filename, + "purpose": purpose, + "status": status + } + +def create_api_response(data, request_id=None): + """ + Создает HTTP-ответ с правильными заголовками + + Args: + data: Данные для ответа + request_id: ID запроса для логирования + + Returns: + Response: HTTP-ответ + """ + response = make_response(jsonify(data)) + set_response_headers(response) + return response + +def find_file_by_id(user_files, file_id): + """ + Находит файл в списке файлов пользователя по ID + + Args: + user_files: Список файлов пользователя + file_id: ID искомого файла + + Returns: + dict/None: Информация о файле или None, если файл не найден + """ + for file_item in user_files: + if file_item.get("id") == file_id: + return file_item + return None + +def find_conversation_id(response_data, request_id=None): + """ + Ищет ID разговора в ответе API + + Args: + response_data: Данные ответа от API + request_id: ID запроса для логирования + + Returns: + str/None: ID разговора или None, если не найден + """ + try: + # Сначала проверяем наиболее вероятные места + if "conversation" in response_data and "uuid" in response_data["conversation"]: + return response_data["conversation"]["uuid"] + elif "id" in response_data: + return response_data["id"] + elif "uuid" in response_data: + return response_data["uuid"] + + # Если не нашли, выполняем рекурсивный поиск + def search_recursively(obj, path=""): + if isinstance(obj, dict): + if "id" in obj: + logger.debug(f"[{request_id}] Found ID at path '{path}.id': {obj['id']}") + return obj["id"] + if "uuid" in obj: + logger.debug(f"[{request_id}] Found UUID at path '{path}.uuid': {obj['uuid']}") + return obj["uuid"] + + for key, value in obj.items(): + result = search_recursively(value, f"{path}.{key}") + if result: + return result + elif isinstance(obj, list): + for i, item in enumerate(obj): + result = search_recursively(item, f"{path}[{i}]") + if result: + return result + return None + + return search_recursively(response_data) + except Exception as e: + logger.error(f"[{request_id}] Error finding conversation ID: {str(e)}") + return None + +def create_conversation_with_files(file_ids, title, model, api_key, request_id=None): + """ + Creates a new conversation with files + + Args: + file_ids: List of file IDs + title: The name of the conversation + model: AI model + api_key: API Key + request_id: Request ID for logging + + Returns: + str: Conversation ID or None in case of error + """ + request_id = request_id or str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Creating conversation with {len(file_ids)} files") + + try: + # Формируем payload для запроса + payload = { + "title": title, + "type": "CHAT_WITH_PDF", + "model": model, + "fileIds": file_ids, + } + + logger.debug(f"[{request_id}] Conversation payload: {json.dumps(payload)}") + + # Используем правильный URL API + conversation_url = "https://api.1min.ai/api/features/conversations?type=CHAT_WITH_PDF" + + logger.debug(f"[{request_id}] Creating conversation using URL: {conversation_url}") + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + response = api_request("POST", conversation_url, json=payload, headers=headers) + + logger.debug(f"[{request_id}] Create conversation response status: {response.status_code}") + + if response.status_code != 200: + logger.error(f"[{request_id}] Failed to create conversation: {response.status_code} - {response.text}") + return None + + response_data = response.json() + logger.debug(f"[{request_id}] Conversation response data: {json.dumps(response_data)}") + + # Ищем ID разговора в разных местах ответа + conversation_id = find_conversation_id(response_data, request_id) + + if not conversation_id: + logger.error(f"[{request_id}] Could not find conversation ID in response: {response_data}") + return None + + logger.info(f"[{request_id}] Conversation created successfully: {conversation_id}") + return conversation_id + except Exception as e: + logger.error(f"[{request_id}] Error creating conversation: {str(e)}") + return None diff --git a/routes/functions/img_func.py b/routes/functions/img_func.py new file mode 100644 index 0000000..76cf690 --- /dev/null +++ b/routes/functions/img_func.py @@ -0,0 +1,650 @@ +# version 1.0.1 #increment every time you make changes +# routes/functions/img_func.py + +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) +from utils.memcached import safe_memcached_operation +from routes.functions.shared_func import extract_image_urls, get_full_url +from flask import jsonify, request +import math +import base64 +import traceback + +from .file_func import upload_asset, get_mime_type +from .shared_func import format_image_response + +#===========================================================# +# ----------- Функции для работы с изображениями -----------# +#===========================================================# + +def build_generation_payload(model, prompt, request_data, negative_prompt, aspect_ratio, size, mode, request_id): + """Build payload for image generation based on model.""" + payload = {} + if model == "dall-e-3": + # Проверяем, входит ли размер в список разрешенных для DALL-E 3 + gen_size = size or request_data.get("size", "1024x1024") + if gen_size not in DALLE3_SIZES: + logger.warning(f"[{request_id}] Размер {gen_size} не входит в список разрешенных для DALL-E 3. Используем {DALLE3_SIZES[0]}") + gen_size = DALLE3_SIZES[0] + + payload = { + "type": "IMAGE_GENERATOR", + "model": "dall-e-3", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 1), + "size": gen_size, + "quality": request_data.get("quality", "standard"), + "style": request_data.get("style", "vivid"), + }, + } + elif model == "dall-e-2": + # Проверяем, входит ли размер в список разрешенных для DALL-E 2 + gen_size = size or request_data.get("size", "1024x1024") + if gen_size not in DALLE2_SIZES: + logger.warning(f"[{request_id}] Размер {gen_size} не входит в список разрешенных для DALL-E 2. Используем {DALLE2_SIZES[0]}") + gen_size = DALLE2_SIZES[0] + + payload = { + "type": "IMAGE_GENERATOR", + "model": "dall-e-2", + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 1), + "size": gen_size, + }, + } + elif model == "stable-diffusion-xl-1024-v1-0": + payload = { + "type": "IMAGE_GENERATOR", + "model": "stable-diffusion-xl-1024-v1-0", + "promptObject": { + "prompt": prompt, + "samples": request_data.get("n", 1), + "size": size or request_data.get("size", "1024x1024"), + "cfg_scale": request_data.get("cfg_scale", 7), + "clip_guidance_preset": request_data.get("clip_guidance_preset", "NONE"), + "seed": request_data.get("seed", 0), + "steps": request_data.get("steps", 30), + }, + } + elif model in ["midjourney", "midjourney_6_1"]: + # Parse aspect ratio parts (default 1:1) + try: + ar_parts = tuple(map(int, aspect_ratio.split(":"))) if aspect_ratio else (1, 1) + except Exception: + ar_parts = (1, 1) + model_name = "midjourney" if model == "midjourney" else "midjourney_6_1" + payload = { + "type": "IMAGE_GENERATOR", + "model": model_name, + "promptObject": { + "prompt": prompt, + "mode": mode or request_data.get("mode", "fast"), + "n": 4, + "aspect_width": ar_parts[0], + "aspect_height": ar_parts[1], + "isNiji6": request_data.get("isNiji6", False), + "maintainModeration": request_data.get("maintainModeration", True), + "image_weight": request_data.get("image_weight", 1), + "weird": request_data.get("weird", 0), + }, + } + if negative_prompt or request_data.get("negativePrompt"): + payload["promptObject"]["negativePrompt"] = negative_prompt or request_data.get("negativePrompt", "") + if request_data.get("no", ""): + payload["promptObject"]["no"] = request_data.get("no", "") + elif model in ["black-forest-labs/flux-schnell", "flux-schnell", + "black-forest-labs/flux-dev", "flux-dev", + "black-forest-labs/flux-pro", "flux-pro", + "black-forest-labs/flux-1.1-pro", "flux-1.1-pro"]: + # Всегда добавляем префикс black-forest-labs/ для моделей flux, если его нет + model_name = model + if not model.startswith("black-forest-labs/"): + model_name = f"black-forest-labs/{model}" + logger.debug(f"[{request_id}] Добавлен префикс к модели Flux: {model_name}") + + payload = { + "type": "IMAGE_GENERATOR", + "model": model_name, + "promptObject": { + "prompt": prompt, + "num_outputs": request_data.get("n", 1), + "aspect_ratio": aspect_ratio or request_data.get("aspect_ratio", "1:1"), + "output_format": request_data.get("output_format", "webp"), + }, + } + elif model in [ + "6b645e3a-d64f-4341-a6d8-7a3690fbf042", "phoenix", + "b24e16ff-06e3-43eb-8d33-4416c2d75876", "lightning-xl", + "5c232a9e-9061-4777-980a-ddc8e65647c6", "vision-xl", + "e71a1c2f-4f80-4800-934f-2c68979d8cc8", "anime-xl", + "1e60896f-3c26-4296-8ecc-53e2afecc132", "diffusion-xl", + "aa77f04e-3eec-4034-9c07-d0f619684628", "kino-xl", + "2067ae52-33fd-4a82-bb92-c2c55e7d2786", "albedo-base-xl" + ]: + payload = { + "type": "IMAGE_GENERATOR", + "model": model, + "promptObject": { + "prompt": prompt, + "n": request_data.get("n", 4), + "size": size, + "negativePrompt": negative_prompt or request_data.get("negativePrompt", ""), + }, + } + # Удаляем пустые параметры + if not payload["promptObject"]["negativePrompt"]: + del payload["promptObject"]["negativePrompt"] + if model == "e71a1c2f-4f80-4800-934f-2c68979d8cc8": + payload["promptObject"]["size"] = size or request_data.get("size", "1024x1024") + payload["promptObject"]["aspect_ratio"] = aspect_ratio + if not payload["promptObject"]["aspect_ratio"]: + del payload["promptObject"]["aspect_ratio"] + else: + logger.error(f"[{request_id}] Invalid model: {model}") + return None, ERROR_HANDLER(1002, model) + return payload, None + +def parse_aspect_ratio(prompt, model, request_data, request_id=None): + """ + Parse aspect ratio, size and other parameters from the prompt. + Enhanced version combining functionality from both implementations. + Returns: (modified prompt, aspect_ratio, size, error_message, mode) + """ + original_prompt = prompt + mode = None + size = request_data.get("size", "1024x1024") + aspect_ratio = None + ar_error = None + + try: + # Extract mode parameter (--fast or --relax) + mode_match = re.search(r'(--|\u2014)(fast|relax)\s*', prompt) + if mode_match: + mode = mode_match.group(2) + prompt = re.sub(r'(--|\u2014)(fast|relax)\s*', '', prompt).strip() + logger.debug(f"[{request_id}] Extracted mode: {mode}") + + # Extract size parameter + size_match = re.search(r'(--|\u2014)size\s+(\d+x\d+)', prompt) + if size_match: + size = size_match.group(2) + prompt = re.sub(r'(--|\u2014)size\s+\d+x\d+\s*', '', prompt).strip() + logger.debug(f"[{request_id}] Extracted size: {size}") + + # Extract aspect ratio from prompt + ar_match = re.search(r'(--|\u2014)ar\s+(\d+):(\d+)', prompt) + if ar_match: + width = int(ar_match.group(2)) + height = int(ar_match.group(3)) + + # Validate aspect ratio + if width <= 0 or height <= 0: + logger.error(f"[{request_id}] Invalid aspect ratio: {width}:{height}") + return original_prompt, None, size, "Aspect ratio dimensions must be positive", mode + + # Check aspect ratio limits + if max(width, height) / min(width, height) > 2: + ar_error = "Aspect ratio cannot exceed 2:1 or 1:2" + logger.error(f"[{request_id}] Invalid aspect ratio: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + + if width > 10000 or height > 10000: + ar_error = "Aspect ratio values must be between 1 and 10000" + logger.error(f"[{request_id}] Invalid aspect ratio values: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + + # Simplify aspect ratio if needed + if width > 10 or height > 10: + gcd_val = math.gcd(width, height) + width = width // gcd_val + height = height // gcd_val + + aspect_ratio = f"{width}:{height}" + prompt = re.sub(r'(--|\u2014)ar\s+\d+:\d+\s*', '', prompt).strip() + logger.debug(f"[{request_id}] Extracted aspect ratio: {aspect_ratio}") + + # Проверяем, входит ли соотношение сторон в разрешенные для модели + if model in ["midjourney", "midjourney_6_1"] and aspect_ratio not in MIDJOURNEY_ALLOWED_ASPECT_RATIOS: + ar_error = f"Аспектное соотношение {aspect_ratio} не поддерживается для модели {model}. Разрешенные значения: {', '.join(MIDJOURNEY_ALLOWED_ASPECT_RATIOS)}" + logger.error(f"[{request_id}] {ar_error}") + return prompt, None, size, ar_error, mode + + # Проверяем для моделей Flux + if (model.startswith("flux") or model.startswith("black-forest-labs/flux")) and aspect_ratio not in FLUX_ALLOWED_ASPECT_RATIOS: + ar_error = f"Аспектное соотношение {aspect_ratio} не поддерживается для модели {model}. Разрешенные значения: {', '.join(FLUX_ALLOWED_ASPECT_RATIOS)}" + logger.error(f"[{request_id}] {ar_error}") + return prompt, None, size, ar_error, mode + + # Check for aspect ratio in request data + elif "aspect_ratio" in request_data: + aspect_ratio = request_data.get("aspect_ratio") + if not re.match(r'^\d+:\d+$', aspect_ratio): + ar_error = "Aspect ratio must be in format width:height" + logger.error(f"[{request_id}] Invalid aspect ratio format: {aspect_ratio} - {ar_error}") + return prompt, None, size, ar_error, mode + width, height = map(int, aspect_ratio.split(':')) + if max(width, height) / min(width, height) > 2: + ar_error = "Aspect ratio cannot exceed 2:1 or 1:2" + logger.error(f"[{request_id}] Invalid aspect ratio: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + if width < 1 or width > 10000 or height < 1 or height > 10000: + ar_error = "Aspect ratio values must be between 1 and 10000" + logger.error(f"[{request_id}] Invalid aspect ratio values: {width}:{height} - {ar_error}") + return prompt, None, size, ar_error, mode + logger.debug(f"[{request_id}] Using aspect ratio from request: {aspect_ratio}") + + # Проверяем, входит ли соотношение сторон в разрешенные для модели + if model in ["midjourney", "midjourney_6_1"] and aspect_ratio not in MIDJOURNEY_ALLOWED_ASPECT_RATIOS: + ar_error = f"Аспектное соотношение {aspect_ratio} не поддерживается для модели {model}. Разрешенные значения: {', '.join(MIDJOURNEY_ALLOWED_ASPECT_RATIOS)}" + logger.error(f"[{request_id}] {ar_error}") + return prompt, None, size, ar_error, mode + + # Проверяем для моделей Flux + if (model.startswith("flux") or model.startswith("black-forest-labs/flux")) and aspect_ratio not in FLUX_ALLOWED_ASPECT_RATIOS: + ar_error = f"Аспектное соотношение {aspect_ratio} не поддерживается для модели {model}. Разрешенные значения: {', '.join(FLUX_ALLOWED_ASPECT_RATIOS)}" + logger.error(f"[{request_id}] {ar_error}") + return prompt, None, size, ar_error, mode + + # Remove negative prompt parameters + prompt = re.sub(r'(--|\u2014)no\s+.*?(?=(--|\u2014)|$)', '', prompt).strip() + + # Handle special case for dall-e-3 which doesn't support custom aspect ratio + if model == "dall-e-3" and aspect_ratio: + width, height = map(int, aspect_ratio.split(':')) + if abs(width / height - 1) < 0.1: + size = "1024x1024" + aspect_ratio = "square" + elif width > height: + size = "1792x1024" + aspect_ratio = "landscape" + else: + size = "1024x1792" + aspect_ratio = "portrait" + logger.debug(f"[{request_id}] Adjusted size for DALL-E 3: {size}, aspect_ratio: {aspect_ratio}") + # Special adjustments for Leonardo models + elif model in [ + "6b645e3a-d64f-4341-a6d8-7a3690fbf042", "phoenix", + "b24e16ff-06e3-43eb-8d33-4416c2d75876", "lightning-xl", + "5c232a9e-9061-4777-980a-ddc8e65647c6", "vision-xl", + "e71a1c2f-4f80-4800-934f-2c68979d8cc8", "anime-xl", + "1e60896f-3c26-4296-8ecc-53e2afecc132", "diffusion-xl", + "aa77f04e-3eec-4034-9c07-d0f619684628", "kino-xl", + "2067ae52-33fd-4a82-bb92-c2c55e7d2786", "albedo-base-xl" + ] and aspect_ratio: + if aspect_ratio == "1:1": + size = LEONARDO_SIZES["1:1"] + elif aspect_ratio == "4:3": + size = LEONARDO_SIZES["4:3"] + elif aspect_ratio == "3:4": + size = LEONARDO_SIZES["3:4"] + else: + width, height = map(int, aspect_ratio.split(':')) + ratio = width / height + if abs(ratio - 1) < 0.1: + size = LEONARDO_SIZES["1:1"] + aspect_ratio = "1:1" + elif ratio > 1: + size = LEONARDO_SIZES["4:3"] + aspect_ratio = "4:3" + else: + size = LEONARDO_SIZES["3:4"] + aspect_ratio = "3:4" + logger.debug(f"[{request_id}] Adjusted size for Leonardo model: {size}, aspect_ratio: {aspect_ratio}") + + return prompt, aspect_ratio, size, ar_error, mode + + except Exception as e: + logger.error(f"[{request_id}] Error parsing aspect ratio: {str(e)}") + return original_prompt, None, size, f"Error parsing parameters: {str(e)}", mode + +def create_image_variations(image_url, user_model, n, aspect_width=None, aspect_height=None, mode=None, request_id=None): + """ + Generate variations of the uploaded image using the 1min.ai API. + Enhanced version combining functionality from both implementations. + + Args: + image_url: URL of the uploaded image + user_model: Requested model name + n: Number of variations to generate + aspect_width: Width for aspect ratio (optional) + aspect_height: Height for aspect ratio (optional) + mode: Generation mode (optional) + request_id: Request ID for logging + + Returns: + list: Image URLs of the generated variations or tuple (response, status_code) in case of error + """ + # Set request_id if not provided + if request_id is None: + request_id = str(uuid.uuid4())[:8] + + variation_urls = [] + current_model = None + + # Try to get saved generation parameters from memcached + generation_params = None + try: + gen_key = f"gen_params:{request_id}" + params_json = safe_memcached_operation('get', gen_key) + if params_json: + if isinstance(params_json, str): + generation_params = json.loads(params_json) + elif isinstance(params_json, bytes): + generation_params = json.loads(params_json.decode('utf-8')) + logger.debug(f"[{request_id}] Retrieved generation parameters from memcached: {generation_params}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving generation parameters: {str(e)}") + + # Use saved parameters if available + if generation_params: + if "aspect_width" in generation_params and "aspect_height" in generation_params: + aspect_width = generation_params.get("aspect_width") + aspect_height = generation_params.get("aspect_height") + logger.debug(f"[{request_id}] Using saved aspect ratio: {aspect_width}:{aspect_height}") + if "mode" in generation_params: + mode = generation_params.get("mode") + logger.debug(f"[{request_id}] Using saved mode: {mode}") + + # Determine which models to try for variations + variation_models = [] + if user_model in VARIATION_SUPPORTED_MODELS: + variation_models.append(user_model) + # Add fallback models + variation_models.extend([m for m in ["midjourney_6_1", "midjourney", "clipdrop", "dall-e-2"] if m != user_model]) + variation_models = list(dict.fromkeys(variation_models)) + logger.info(f"[{request_id}] Trying image variations with models: {variation_models}") + + try: + # Get API key from request + auth_header = request.headers.get("Authorization", "") + api_key = auth_header.replace("Bearer ", "") if auth_header.startswith("Bearer ") else "" + + if not api_key: + logger.error(f"[{request_id}] No API key provided for variation") + return None + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + session = create_session() + + try: + # Download the image from the URL + image_response = session.get(image_url, stream=True, timeout=MIDJOURNEY_TIMEOUT) + if image_response.status_code != 200: + logger.error(f"[{request_id}] Failed to download image: {image_response.status_code}") + return jsonify({"error": "Failed to download image"}), 500 + + # Try each model in sequence + for model in variation_models: + current_model = model + logger.info(f"[{request_id}] Trying model: {model} for image variations") + + try: + # Determine MIME type and extension + content_type = "image/png" + if "content-type" in image_response.headers: + content_type = image_response.headers["content-type"] + elif image_url.lower().endswith(".webp"): + content_type = "image/webp" + elif image_url.lower().endswith(".jpg") or image_url.lower().endswith(".jpeg"): + content_type = "image/jpeg" + elif image_url.lower().endswith(".gif"): + content_type = "image/gif" + + ext = "png" + if "webp" in content_type: + ext = "webp" + elif "jpeg" in content_type or "jpg" in content_type: + ext = "jpg" + elif "gif" in content_type: + ext = "gif" + logger.debug(f"[{request_id}] Detected image type: {content_type}, extension: {ext}") + + # Upload image to server + files = {"asset": (f"variation.{ext}", image_response.content, content_type)} + upload_response = session.post(ONE_MIN_ASSET_URL, files=files, headers=headers) + + if upload_response.status_code != 200: + logger.error(f"[{request_id}] Image upload failed: {upload_response.status_code}") + continue + + upload_data = upload_response.json() + logger.debug(f"[{request_id}] Asset upload response: {upload_data}") + + image_path = None + if "fileContent" in upload_data and "path" in upload_data["fileContent"]: + image_path = upload_data["fileContent"]["path"] + if image_path.startswith('/'): + image_path = image_path[1:] + logger.debug(f"[{request_id}] Using relative path for variation: {image_path}") + else: + logger.error(f"[{request_id}] Could not extract image path from upload response") + continue + + # Create model-specific payload + payload = {} + if model in ["midjourney_6_1", "midjourney"]: + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": image_path, + "mode": mode or "fast", + "n": 4, + "isNiji6": False, + "aspect_width": aspect_width or 1, + "aspect_height": aspect_height or 1, + "maintainModeration": True + } + } + logger.info(f"[{request_id}] Midjourney variation payload: {json.dumps(payload['promptObject'], indent=2)}") + elif model == "dall-e-2": + payload = { + "type": "IMAGE_VARIATOR", + "model": "dall-e-2", + "promptObject": { + "imageUrl": image_path, + "n": 1, + "size": "1024x1024" + } + } + logger.info(f"[{request_id}] DALL-E 2 variation payload: {json.dumps(payload, indent=2)}") + + # Try DALL-E 2 specific endpoint first + variation_response = api_request("POST", ONE_MIN_API_URL, headers=headers, json=payload, timeout=MIDJOURNEY_TIMEOUT) + if variation_response.status_code != 200: + logger.error(f"[{request_id}] DALL-E 2 variation failed: {variation_response.status_code}, {variation_response.text}") + continue + + variation_data = variation_response.json() + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with DALL-E 2") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in DALL-E 2 response") + elif model == "clipdrop": + payload = { + "type": "IMAGE_VARIATOR", + "model": "clipdrop", + "promptObject": { + "imageUrl": image_path, + "n": n + } + } + logger.info(f"[{request_id}] Clipdrop variation payload: {json.dumps(payload, indent=2)}") + + # Try Clipdrop specific endpoint + variation_response = api_request("POST", ONE_MIN_API_URL, headers=headers, json=payload, timeout=MIDJOURNEY_TIMEOUT) + if variation_response.status_code != 200: + logger.error(f"[{request_id}] Clipdrop variation failed: {variation_response.status_code}, {variation_response.text}") + continue + + variation_data = variation_response.json() + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with Clipdrop") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in Clipdrop response") + + # If we reach here for midjourney or if previous attempts didn't succeed, try main API endpoint + if payload: + timeout = MIDJOURNEY_TIMEOUT if model.startswith("midjourney") else DEFAULT_TIMEOUT + + # Make the API request + variation_response = api_request("POST", ONE_MIN_API_URL, headers=headers, json=payload, timeout=timeout) + + if variation_response.status_code != 200: + logger.error(f"[{request_id}] Variation request with model {model} failed: {variation_response.status_code} - {variation_response.text}") + # When the Gateway Timeout (504) error, we return the error immediately, and do not continue to process + if variation_response.status_code == 504: + logger.error(f"[{request_id}] Midjourney API timeout (504). Returning error to client instead of fallback.") + return jsonify({ + "error": "Gateway Timeout (504) occurred while processing image variation request. Try again later." + }), 504 + continue + + # Process the response + variation_data = variation_response.json() + + # Extract variation URLs from response + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with {model}") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in response for model {model}") + + except Exception as e: + logger.error(f"[{request_id}] Error with model {model}: {str(e)}") + continue + + # Handle case where all models failed + if not variation_urls: + logger.error(f"[{request_id}] Failed to create variations with any available model") + return jsonify({"error": "Failed to create image variations with any available model"}), 500 + + # Format the successful response + logger.info(f"[{request_id}] Generated {len(variation_urls)} image variations with {current_model}") + return variation_urls + + finally: + session.close() + + except Exception as e: + logger.error(f"[{request_id}] Error generating image variations: {str(e)}") + return jsonify({"error": str(e)}), 500 + +def retry_image_upload(image_url, api_key, request_id=None): + """Uploads an image with repeated attempts, returns a direct link to it.""" + request_id = request_id or str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Uploading image: {image_url}") + session = create_session() + temp_file_path = None + try: + if image_url.startswith(("http://", "https://")): + logger.debug(f"[{request_id}] Fetching image from URL: {image_url}") + response = session.get(image_url, stream=True) + response.raise_for_status() + image_data = response.content + else: + logger.debug(f"[{request_id}] Decoding base64 image") + image_data = base64.b64decode(image_url.split(",")[1]) + if len(image_data) == 0: + logger.error(f"[{request_id}] Empty image data") + return None + temp_file_path = safe_temp_file("image", request_id) + with open(temp_file_path, "wb") as f: + f.write(image_data) + if os.path.getsize(temp_file_path) == 0: + logger.error(f"[{request_id}] Empty image file created: {temp_file_path}") + return None + try: + with open(temp_file_path, "rb") as f: + upload_response = session.post( + ONE_MIN_ASSET_URL, + headers={"API-KEY": api_key}, + files={"asset": (os.path.basename(image_url), + f, + "image/webp" if image_url.endswith(".webp") else "image/jpeg")} + ) + if upload_response.status_code != 200: + logger.error(f"[{request_id}] Upload failed with status {upload_response.status_code}: {upload_response.text}") + return None + upload_data = upload_response.json() + if isinstance(upload_data, str): + try: + upload_data = json.loads(upload_data) + except: + logger.error(f"[{request_id}] Failed to parse upload response: {upload_data}") + return None + logger.debug(f"[{request_id}] Upload response: {upload_data}") + if "fileContent" in upload_data and "path" in upload_data["fileContent"]: + url = upload_data["fileContent"]["path"] + logger.info(f"[{request_id}] Image uploaded successfully: {url}") + return url + logger.error(f"[{request_id}] No path found in upload response") + return None + except Exception as e: + logger.error(f"[{request_id}] Exception during image upload: {str(e)}") + return None + except Exception as e: + logger.error(f"[{request_id}] Exception during image processing: {str(e)}") + traceback.print_exc() + return None + finally: + session.close() + if temp_file_path and os.path.exists(temp_file_path): + try: + os.remove(temp_file_path) + logger.debug(f"[{request_id}] Removed temp file: {temp_file_path}") + except Exception as e: + logger.warning(f"[{request_id}] Failed to remove temp file {temp_file_path}: {str(e)}") diff --git a/routes/functions/shared_func.py b/routes/functions/shared_func.py new file mode 100644 index 0000000..4f2e7de --- /dev/null +++ b/routes/functions/shared_func.py @@ -0,0 +1,484 @@ +# routes/functions/shared_func.py + +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) +from utils.memcached import safe_memcached_operation + +#========================================================================# +# ----------- Общие функции для авторизации и обработки ошибок ----------# +#========================================================================# + +def validate_auth(request, request_id=None): + """ + Проверяет авторизацию запроса + + Args: + request: Объект запроса Flask + request_id: ID запроса для логирования + + Returns: + tuple: (api_key, error_response) + api_key будет None если авторизация не прошла + """ + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return None, ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + return api_key, None + +def handle_api_error(response, api_key=None, request_id=None): + """ + Обрабатывает ошибки API + + Args: + response: Ответ от API + api_key: API ключ пользователя + request_id: ID запроса для логирования + + Returns: + tuple: (error_json, status_code) + """ + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + + error_text = "Unknown error" + try: + error_json = response.json() + if "error" in error_json: + error_text = error_json["error"] + except: + pass + + logger.error(f"[{request_id}] API error: {response.status_code} - {error_text}") + return jsonify({"error": error_text}), response.status_code + +#=======================================================# +# ----------- Функции форматирования ответов -----------# +#=======================================================# + +def format_openai_response(content, model, request_id=None, prompt_tokens=0): + """ + Форматирует ответ в формат OpenAI API + + Args: + content: Текст ответа + model: Название модели + request_id: ID запроса для логирования + prompt_tokens: Количество токенов в запросе + + Returns: + dict: Ответ в формате OpenAI + """ + completion_tokens = calculate_token(content) + + return { + "id": f"chatcmpl-{request_id or str(uuid.uuid4())[:8]}", + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": content + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": completion_tokens, + "total_tokens": prompt_tokens + completion_tokens + } + } + +def format_image_response(image_urls, request_id=None, model=None): + """ + Форматирует ответ с изображениями в формат OpenAI API + + Args: + image_urls: Список URL изображений + request_id: ID запроса для логирования + model: Название модели + + Returns: + dict: Ответ в формате OpenAI + """ + # Формируем полные URL для отображения + full_urls = [] + asset_host = "https://asset.1min.ai" + + for url in image_urls: + if not url: + continue + + if not url.startswith("http"): + if url.startswith("/"): + full_url = f"{asset_host}{url}" + else: + full_url = f"{asset_host}/{url}" + else: + full_url = url + + full_urls.append(full_url) + + # Формируем ответ в формате OpenAI + openai_data = [] + for i, url in enumerate(full_urls, 1): + openai_data.append({ + "url": url, + "revised_prompt": None, + "variation_commands": { + "variation": f"/v{i} {url}" + } if model in IMAGE_VARIATION_MODELS else None + }) + + # Формируем текст с кнопками вариаций + markdown_text = "" + if len(full_urls) == 1: + markdown_text = f"![Image]({full_urls[0]}) `[_V1_]`" + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** and send it (paste) in the next **prompt**" + else: + image_lines = [] + for i, url in enumerate(full_urls, 1): + image_lines.append(f"![Image {i}]({url}) `[_V{i}_]`") + + markdown_text = "\n".join(image_lines) + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**" + + response = { + "created": int(time.time()), + "data": openai_data, + "choices": [{ + "message": { + "role": "assistant", + "content": markdown_text, + "structured_output": { + "type": "image", + "image_urls": full_urls + } + }, + "index": 0, + "finish_reason": "stop" + }] + } + + logger.debug(f"[{request_id}] Formatted response with {len(full_urls)} images") + return response + +def stream_response(response, request_data, model, prompt_tokens, session=None): + """ + Стримит ответ от API в формате OpenAI + + Args: + response: Ответ от API + request_data: Данные запроса + model: Название модели + prompt_tokens: Количество токенов в запросе + session: Сессия для запросов + + Yields: + str: Строки для стриминга + """ + all_chunks = "" + session_created = False + + # Если сессия не передана, создаем новую + if not session: + session = create_session() + session_created = True + + # Отправляем первый фрагмент с ролью + first_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {"role": "assistant"}, + "finish_reason": None + }] + } + + yield f"data: {json.dumps(first_chunk)}\n\n" + + try: + # Обрабатываем контент + for chunk in response.iter_content(chunk_size=1024): + if chunk: + return_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {"content": chunk.decode('utf-8')}, + "finish_reason": None + }] + } + all_chunks += chunk.decode('utf-8') + yield f"data: {json.dumps(return_chunk)}\n\n" + + # Считаем токены + tokens = calculate_token(all_chunks) + + # Финальный чанк + final_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {"content": ""}, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": tokens, + "total_tokens": tokens + prompt_tokens + } + } + + yield f"data: {json.dumps(final_chunk)}\n\n" + yield "data: [DONE]\n\n" + + except requests.exceptions.ChunkedEncodingError: + # Обрабатываем ошибку прерванного соединения + logger.warning(f"Соединение с API прервано преждевременно. Получена только часть ответа.") + error_message = "Соединение прервано. Получена только часть ответа." + + # Считаем токены для полученной части ответа + tokens = calculate_token(all_chunks) + + # Отправляем уведомление об ошибке клиенту + error_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {"content": f"\n\n{error_message}"}, + "finish_reason": "error" + }], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": tokens, + "total_tokens": tokens + prompt_tokens + } + } + yield f"data: {json.dumps(error_chunk)}\n\n" + yield "data: [DONE]\n\n" + + except Exception as e: + # Обрабатываем другие возможные исключения + logger.error(f"Ошибка при потоковой передаче: {str(e)}") + error_message = f"Ошибка при получении ответа: {str(e)}" + + # Считаем токены для полученной части ответа + tokens = calculate_token(all_chunks) + + # Отправляем уведомление об ошибке клиенту + error_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {"content": f"\n\n{error_message}"}, + "finish_reason": "error" + }], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": tokens, + "total_tokens": tokens + prompt_tokens + } + } + yield f"data: {json.dumps(error_chunk)}\n\n" + yield "data: [DONE]\n\n" + + finally: + # Закрываем сессию, если она была создана внутри функции + if session_created and session: + session.close() + +#=======================================================# +# ----------- Функции извлечения данных из API ---------# +#=======================================================# + +def get_full_url(url, asset_host="https://asset.1min.ai"): + """ + Формирует полный URL для ресурса + + Args: + url: Относительный или абсолютный URL + asset_host: Базовый URL хоста + + Returns: + str: Полный URL + """ + if not url.startswith("http"): + return f"{asset_host}{url}" if url.startswith("/") else f"{asset_host}/{url}" + return url + +def extract_data_from_api_response(response_data, request_id=None): + """ + Общая функция для извлечения данных из ответа API 1min.ai + + Args: + response_data: Данные ответа от API + request_id: ID запроса для логирования + + Returns: + object: Извлеченный объект данных или None + """ + try: + # Проверяем структуру aiRecord (основная структура ответа) + if "aiRecord" in response_data and "aiRecordDetail" in response_data["aiRecord"]: + result_object = response_data["aiRecord"]["aiRecordDetail"].get("resultObject", None) + return result_object + + # Проверяем прямую структуру resultObject + elif "resultObject" in response_data: + return response_data["resultObject"] + + # Ничего не найдено + logger.error(f"[{request_id}] Could not extract data from API response") + return None + + except Exception as e: + logger.error(f"[{request_id}] Error extracting data from response: {str(e)}") + return None + +def extract_text_from_response(response_data, request_id=None): + """ + Извлекает текст из ответа API + + Args: + response_data: Данные ответа от API + request_id: ID запроса для логирования + + Returns: + str: Извлеченный текст или пустая строка в случае ошибки + """ + result_text = "" + + try: + # Получаем данные через общую функцию + result_object = extract_data_from_api_response(response_data, request_id) + + if result_object: + # Обработка в зависимости от типа данных + if isinstance(result_object, list) and result_object: + result_text = result_object[0] + elif isinstance(result_object, str): + result_text = result_object + else: + result_text = str(result_object) + + # Проверяем если result_text это json + if result_text and isinstance(result_text, str) and result_text.strip().startswith("{"): + try: + parsed_json = json.loads(result_text) + if "text" in parsed_json: + result_text = parsed_json["text"] + logger.debug(f"[{request_id}] Extracted inner text from JSON") + except (json.JSONDecodeError, TypeError, ValueError): + pass + + if not result_text: + logger.error(f"[{request_id}] Could not extract text from API response") + + except Exception as e: + logger.error(f"[{request_id}] Error extracting text from response: {str(e)}") + + return result_text + +def extract_image_urls(response_data, request_id=None): + """ + Извлекает URL изображений из ответа API + + Args: + response_data: Ответ от API + request_id: ID запроса для логирования + + Returns: + list: Список URL изображений + """ + image_urls = [] + + try: + # Получаем данные через общую функцию + result_object = extract_data_from_api_response(response_data, request_id) + + if result_object: + # Обработка в зависимости от типа данных + if isinstance(result_object, list): + image_urls.extend(result_object) + elif isinstance(result_object, str): + image_urls.append(result_object) + + # Специфичная проверка для OpenAI-совместимых ответов + elif "data" in response_data and isinstance(response_data["data"], list): + for item in response_data["data"]: + if "url" in item: + image_urls.append(item["url"]) + + logger.debug(f"[{request_id}] Extracted {len(image_urls)} image URLs") + + if not image_urls: + logger.error(f"[{request_id}] Could not extract image URLs from API response: {json.dumps(response_data)[:500]}") + + except Exception as e: + logger.error(f"[{request_id}] Error extracting image URLs: {str(e)}") + + return image_urls + +def extract_audio_url(response_data, request_id=None): + """ + Извлекает URL аудио из ответа API + + Args: + response_data: Данные ответа от API + request_id: ID запроса для логирования + + Returns: + str: URL аудио или пустая строка в случае ошибки + """ + audio_url = "" + + try: + # Получаем данные через общую функцию + result_object = extract_data_from_api_response(response_data, request_id) + + if result_object: + # Обработка в зависимости от типа данных + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + elif isinstance(result_object, str): + audio_url = result_object + + if not audio_url: + logger.error(f"[{request_id}] Could not extract audio URL from API response") + + except Exception as e: + logger.error(f"[{request_id}] Error extracting audio URL from response: {str(e)}") + + return audio_url diff --git a/routes/functions/txt_func.py b/routes/functions/txt_func.py new file mode 100644 index 0000000..aff8788 --- /dev/null +++ b/routes/functions/txt_func.py @@ -0,0 +1,405 @@ +# routes/functions/txt_func.py + +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + calculate_token +) +import json + +# Импортируем необходимые константы и функции из других модулей +from .shared_func import ( + format_openai_response +) + +#=================================================================# +# ----------- Функции для работы с текстовыми моделями -----------# +#=================================================================# + +def format_conversation_history(messages, new_input): + """ + Formats the conversation history into a structured string. + + Args: + messages (list): List of message dictionaries from the request + new_input (str): The new user input message + + Returns: + str: Formatted conversation history + """ + formatted_history = [] + + for message in messages: + role = message.get("role", "") + content = message.get("content", "") + + # Handle potential list content + if isinstance(content, list): + processed_content = [] + for item in content: + if "text" in item: + processed_content.append(item["text"]) + content = "\n".join(processed_content) + + if role == "system": + formatted_history.append(f"System: {content}") + elif role == "user": + formatted_history.append(f"User: {content}") + elif role == "assistant": + formatted_history.append(f"Assistant: {content}") + + # Add new input if it is + if new_input: + formatted_history.append(f"User: {new_input}") + + # We return only the history of dialogue without additional instructions + return "\n".join(formatted_history) + +def get_model_capabilities(model): + """ + Defines supported opportunities for a specific model + + Args: + Model: The name of the model + + Returns: + DICT: Dictionary with flags of supporting different features + """ + capabilities = { + "vision": False, + "code_interpreter": False, + "retrieval": False, + "function_calling": False, + } + + # We check the support of each opportunity through the corresponding arrays + capabilities["vision"] = model in VISION_SUPPORTED_MODELS + capabilities["code_interpreter"] = model in CODE_INTERPRETER_SUPPORTED_MODELS + capabilities["retrieval"] = model in RETRIEVAL_SUPPORTED_MODELS + capabilities["function_calling"] = model in FUNCTION_CALLING_SUPPORTED_MODELS + + return capabilities + +def prepare_payload( + request_data, model, all_messages, image_paths=None, request_id=None +): + """ + Prepares Payload for request, taking into account the capabilities of the model + + Args: + Request_Data: Request data + Model: Model + All_Messages: Posts of Posts + image_paths: ways to images + Request_id: ID query + + Returns: + DICT: Prepared Payload + """ + capabilities = get_model_capabilities(model) + + # Check the availability of Openai tools + tools = request_data.get("tools", []) + web_search = False + code_interpreter = False + + if tools: + for tool in tools: + tool_type = tool.get("type", "") + # Trying to include functions, but if they are not supported, we just log in + if tool_type == "retrieval": + if capabilities["retrieval"]: + web_search = True + logger.debug( + f"[{request_id}] Enabled web search due to retrieval tool" + ) + else: + logger.debug( + f"[{request_id}] Model {model} does not support web search, ignoring retrieval tool" + ) + elif tool_type == "code_interpreter": + if capabilities["code_interpreter"]: + code_interpreter = True + logger.debug(f"[{request_id}] Enabled code interpreter") + else: + logger.debug( + f"[{request_id}] Model {model} does not support code interpreter, ignoring tool" + ) + else: + logger.debug(f"[{request_id}] Ignoring unsupported tool: {tool_type}") + + # We check the direct parameters 1min.ai + if not web_search and request_data.get("web_search", False): + if capabilities["retrieval"]: + web_search = True + else: + logger.debug( + f"[{request_id}] Model {model} does not support web search, ignoring web_search parameter" + ) + + num_of_site = request_data.get("num_of_site", 3) + max_word = request_data.get("max_word", 500) + + # We form the basic Payload + if image_paths: + # Even if the model does not support images, we try to send as a text request + if capabilities["vision"]: + # Add instructions to the prompt field + enhanced_prompt = all_messages + if not enhanced_prompt.strip().startswith(IMAGE_DESCRIPTION_INSTRUCTION): + enhanced_prompt = f"{IMAGE_DESCRIPTION_INSTRUCTION}\n\n{all_messages}" + + payload = { + "type": "CHAT_WITH_IMAGE", + "model": model, + "promptObject": { + "prompt": enhanced_prompt, + "isMixed": False, + "imageList": image_paths, + "webSearch": web_search, + "numOfSite": num_of_site if web_search else None, + "maxWord": max_word if web_search else None, + }, + } + + if web_search: + logger.debug( + f"[{request_id}] Web search enabled in payload with numOfSite={num_of_site}, maxWord={max_word}") + else: + logger.debug( + f"[{request_id}] Model {model} does not support vision, falling back to text-only chat" + ) + payload = { + "type": "CHAT_WITH_AI", + "model": model, + "promptObject": { + "prompt": all_messages, + "isMixed": False, + "webSearch": web_search, + "numOfSite": num_of_site if web_search else None, + "maxWord": max_word if web_search else None, + }, + } + + if web_search: + logger.debug( + f"[{request_id}] Web search enabled in payload with numOfSite={num_of_site}, maxWord={max_word}") + elif code_interpreter: + # If Code_interpreter is requested and supported + payload = { + "type": "CODE_GENERATOR", + "model": model, + "conversationId": "CODE_GENERATOR", + "promptObject": {"prompt": all_messages}, + } + else: + # Basic text request + payload = { + "type": "CHAT_WITH_AI", + "model": model, + "promptObject": { + "prompt": all_messages, + "isMixed": False, + "webSearch": web_search, + "numOfSite": num_of_site if web_search else None, + "maxWord": max_word if web_search else None, + }, + } + + if web_search: + logger.debug( + f"[{request_id}] Web search enabled in payload with numOfSite={num_of_site}, maxWord={max_word}") + + return payload + +def transform_response(one_min_response, request_data, prompt_token): + try: + # Output of the response structure for debugging + logger.debug(f"Response structure: {json.dumps(one_min_response)[:200]}...") + + # We get an answer from the appropriate place to json + result_text = ( + one_min_response.get("aiRecord", {}) + .get("aiRecordDetail", {}) + .get("resultObject", [""])[0] + ) + + if not result_text: + # Alternative ways to extract an answer + if "resultObject" in one_min_response: + result_text = ( + one_min_response["resultObject"][0] + if isinstance(one_min_response["resultObject"], list) + else one_min_response["resultObject"] + ) + elif "result" in one_min_response: + result_text = one_min_response["result"] + else: + # If you have not found an answer along the well -known paths, we return the error + logger.error(f"Cannot extract response text from API result") + result_text = "Error: Could not extract response from API" + + completion_token = calculate_token(result_text) + logger.debug( + f"Finished processing Non-Streaming response. Completion tokens: {str(completion_token)}" + ) + logger.debug(f"Total tokens: {str(completion_token + prompt_token)}") + + return { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion", + "created": int(time.time()), + "model": request_data.get("model", "mistral-nemo").strip(), + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": result_text, + }, + "finish_reason": "stop", + } + ], + "usage": { + "prompt_tokens": prompt_token, + "completion_tokens": completion_token, + "total_tokens": prompt_token + completion_token, + }, + } + except Exception as e: + logger.error(f"Error in transform_response: {str(e)}") + # Return the error in the format compatible with Openai + return { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion", + "created": int(time.time()), + "model": request_data.get("model", "mistral-nemo").strip(), + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": f"Error processing response: {str(e)}", + }, + "finish_reason": "stop", + } + ], + "usage": { + "prompt_tokens": prompt_token, + "completion_tokens": 0, + "total_tokens": prompt_token, + }, + } + +def emulate_stream_response(full_content, request_data, model, prompt_tokens): + """ + Emulates a streaming response for cases when the API does not support the flow gear + + Args: + Full_Content: Full text of the answer + Request_Data: Request data + Model: Model + Prompt_tokens: the number of tokens in the request + + Yields: + STR: Lines for streaming + """ + # We break the answer to fragments by ~ 5 words + words = full_content.split() + chunks = [" ".join(words[i: i + 5]) for i in range(0, len(words), 5)] + + for chunk in chunks: + return_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [ + {"index": 0, "delta": {"content": chunk}, "finish_reason": None} + ], + } + + yield f"data: {json.dumps(return_chunk)}\n\n" + time.sleep(0.05) # Small delay in emulating stream + + # We calculate the tokens + tokens = calculate_token(full_content) + + # Final chambers + final_chunk = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{"index": 0, "delta": {"content": ""}, "finish_reason": "stop"}], + "usage": { + "prompt_tokens": prompt_tokens, + "completion_tokens": tokens, + "total_tokens": tokens + prompt_tokens, + }, + } + + yield f"data: {json.dumps(final_chunk)}\n\n" + yield "data: [DONE]\n\n" +# Определяем функцию streaming_request, которая используется для обработки потоковых запросов +def streaming_request(api_url, payload, headers, request_id, model, model_settings=None, api_params=None): + """ + Выполняет потоковый запрос к API и возвращает ответ в формате потока + + Args: + api_url: URL для запроса + payload: Данные запроса + headers: Заголовки запроса + request_id: ID запроса для логирования + model: Модель для запроса + model_settings: Настройки модели (опционально) + api_params: Дополнительные параметры запроса (опционально) + + Returns: + Response: Объект потокового ответа + """ + try: + # Используем сессию для контроля соединения + session = create_session() + + # Добавляем параметры запроса, если они есть + if api_params: + response_stream = session.post( + api_url, json=payload, headers=headers, params=api_params, stream=True + ) + else: + response_stream = session.post( + api_url, json=payload, headers=headers, stream=True + ) + + logger.debug(f"[{request_id}] Streaming response status code: {response_stream.status_code}") + + if response_stream.status_code != 200: + if response_stream.status_code == 401: + session.close() + return ERROR_HANDLER(1020, key=headers.get("API-KEY", "")) + + logger.error(f"[{request_id}] Error status code: {response_stream.status_code}") + try: + error_content = response_stream.json() + logger.error(f"[{request_id}] Error response: {error_content}") + except: + logger.error(f"[{request_id}] Could not parse error response as JSON") + + session.close() + return ERROR_HANDLER(response_stream.status_code) + + # Вычисляем количество токенов + prompt_token = calculate_token(payload["message"] if "message" in payload else + payload.get("promptObject", {}).get("prompt", "")) + + # Передаем сессию генератору + return Response( + stream_response(response_stream, {"model": model}, model, prompt_token, session), + content_type="text/event-stream" + ) + except Exception as e: + logger.error(f"[{request_id}] Exception during streaming request: {str(e)}") + return jsonify({"error": str(e)}), 500 diff --git a/routes/images.py b/routes/images.py new file mode 100644 index 0000000..6a5c182 --- /dev/null +++ b/routes/images.py @@ -0,0 +1,497 @@ +# routes/images.py + +# Импортируем только необходимые модули +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token +) +from utils.memcached import safe_memcached_operation +from routes.functions.shared_func import validate_auth, handle_api_error, format_image_response, get_full_url, extract_image_urls +from routes.functions.img_func import build_generation_payload, parse_aspect_ratio, create_image_variations, retry_image_upload +from . import app, limiter, MEMORY_STORAGE # Импортируем app, limiter и MEMORY_STORAGE из модуля routes + +# ----------------------- Endpoints ----------------------- + +@app.route("/v1/images/generations", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def generate_image(): + if request.method == "OPTIONS": + return handle_options_request() + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/images/generations") + + # Validate authentication + api_key, error = validate_auth(request, request_id) + if error: + return error + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + api_key = auth_header.split(" ")[1] + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + if not request.is_json: + logger.error(f"[{request_id}] Request content-type is not application/json") + return jsonify({"error": "Content-type must be application/json"}), 400 + request_data = request.get_json() + + model = request_data.get("model", "dall-e-3").strip() + prompt = request_data.get("prompt", "").strip() + + # Если запрос пришёл с chat/completions – используем только последний запрос пользователя + if request.environ.get("HTTP_REFERER") and "chat/completions" in request.environ.get("HTTP_REFERER"): + logger.debug(f"[{request_id}] Request came from chat completions, isolating the prompt") + + negative_prompt = None + no_match = re.search(r'(--|\u2014)no\s+(.*?)(?=(--|\u2014)|$)', prompt) + if no_match: + negative_prompt = no_match.group(2).strip() + prompt = re.sub(r'(--|\u2014)no\s+.*?(?=(--|\u2014)|$)', '', prompt).strip() + + prompt, aspect_ratio, size, ar_error, mode = parse_aspect_ratio(prompt, model, request_data, request_id) + if ar_error: + return jsonify({"error": ar_error}), 400 + + if not prompt: + messages = request_data.get("messages", []) + if messages: + last_message = messages[-1] + content = last_message.get("content", "") + if isinstance(content, str): + prompt = content + elif isinstance(content, list): + prompt = " ".join([item.get("text", "") for item in content if isinstance(item, dict)]) + negative_prompt = None + no_match = re.search(r'(--|\u2014)no\s+(.*?)(?=(--|\u2014)|$)', prompt) + if no_match: + negative_prompt = no_match.group(2).strip() + prompt, aspect_ratio, size, ar_error, mode = parse_aspect_ratio(prompt, model, request_data, request_id) + if ar_error: + return jsonify({"error": ar_error}), 400 + if not prompt: + logger.error(f"[{request_id}] No prompt provided") + return jsonify({"error": "No prompt provided"}), 400 + + logger.info(f"[{request_id}] Using model: {model}, prompt: '{prompt}'") + + try: + api_url = f"{ONE_MIN_API_URL}" + timeout = MIDJOURNEY_TIMEOUT if model in ["midjourney", "midjourney_6_1"] else DEFAULT_TIMEOUT + + payload, payload_error = build_generation_payload(model, prompt, request_data, negative_prompt, aspect_ratio, size, mode, request_id) + if payload_error: + return payload_error + + logger.debug(f"[{request_id}] Sending request to API: {api_url}") + logger.debug(f"[{request_id}] Payload: {json.dumps(payload)[:500]}") + + response = api_request("POST", api_url, headers=headers, json=payload, timeout=timeout, stream=False) + logger.debug(f"[{request_id}] Response status code: {response.status_code}") + + if response.status_code == 200: + api_response = response.json() + else: + error_msg = "Unknown error" + try: + error_data = response.json() + if "error" in error_data: + error_msg = error_data["error"] + except: + pass + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + return jsonify({"error": error_msg}), response.status_code + + image_urls = extract_image_urls(api_response, request_id) + if not image_urls: + return jsonify({"error": "Could not extract image URLs from API response"}), 500 + + logger.debug(f"[{request_id}] Successfully generated {len(image_urls)} images") + # Сохраняем параметры генерации для Midjourney + if model in ["midjourney", "midjourney_6_1"]: + for url in image_urls: + if url: + image_id_match = re.search(r'images/(\d+_\d+_\d+_\d+_\d+_\d+|\w+\d+)\.png', url) + if image_id_match: + image_id = image_id_match.group(1) + logger.info(f"[{request_id}] Extracted image_id from URL: {image_id}") + gen_params = { + "mode": payload["promptObject"].get("mode", "fast"), + "aspect_width": payload["promptObject"].get("aspect_width", 1), + "aspect_height": payload["promptObject"].get("aspect_height", 1), + "isNiji6": payload["promptObject"].get("isNiji6", False), + "maintainModeration": payload["promptObject"].get("maintainModeration", True) + } + gen_params_key = f"gen_params:{image_id}" + safe_memcached_operation('set', gen_params_key, gen_params, expiry=3600*24*7) + logger.info(f"[{request_id}] Saved generation parameters for image {image_id}: {gen_params}") + + full_image_urls = [get_full_url(url) for url in image_urls if url] + + openai_data = [] + for i, url in enumerate(full_image_urls): + if model in IMAGE_VARIATION_MODELS: + openai_data.append({ + "url": url, + "revised_prompt": prompt, + "variation_commands": {"variation": f"/v{i + 1} {url}"} + }) + else: + openai_data.append({"url": url, "revised_prompt": prompt}) + + markdown_text = "" + if len(full_image_urls) == 1: + markdown_text = f"![Image]({full_image_urls[0]}) `[_V1_]`" + else: + markdown_text = "\n".join([f"![Image {i+1}]({url}) `[_V{i+1}_]`" for i, url in enumerate(full_image_urls)]) + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]**" \ + " - **[_V4_]** and send it (paste) in the next **prompt**" + + openai_response = { + "created": int(time.time()), + "data": openai_data, + "choices": [{ + "message": { + "role": "assistant", + "content": markdown_text, + "structured_output": {"type": "image", "image_urls": full_image_urls} + }, + "index": 0, + "finish_reason": "stop" + }] + } + response_obj = make_response(jsonify(openai_response)) + set_response_headers(response_obj) + return response_obj, 200 + except Exception as e: + logger.error(f"[{request_id}] Error during image generation: {str(e)}") + return jsonify({"error": str(e)}), 500 + + +@app.route("/v1/images/variations", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +@cross_origin() +def image_variations(): + """ + Process the image variation request. Supports both JSON (with base64 image) and form-data (with image file). + """ + if request.method == "OPTIONS": + return handle_options_request() + request_id = str(uuid.uuid4()) + logger.debug(f"[{request_id}] Processing image variation request") + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error(f"[{request_id}] Invalid Authentication") + return ERROR_HANDLER(1021) + api_key = auth_header.split(" ")[1] + + # Если запрос перенаправлен (передан request_id) + if 'request_id' in request.args: + redirect_request_id = request.args.get('request_id') + variation_key = f"variation:{redirect_request_id}" + logger.info(f"[{request_id}] Looking for variation data with key: {variation_key}") + variation_data_json = safe_memcached_operation('get', variation_key) + if variation_data_json: + try: + if isinstance(variation_data_json, str): + variation_data = json.loads(variation_data_json) + elif isinstance(variation_data_json, bytes): + variation_data = json.loads(variation_data_json.decode('utf-8')) + else: + variation_data = variation_data_json + temp_file_path = variation_data.get("temp_file") + model = variation_data.get("model") + n = variation_data.get("n", 1) + image_path = variation_data.get("image_path") + logger.debug(f"[{request_id}] Retrieved variation data: model={model}, n={n}, temp_file={temp_file_path}") + if image_path: + logger.debug(f"[{request_id}] Retrieved image path: {image_path}") + if os.path.exists(temp_file_path): + with open(temp_file_path, 'rb') as f: + file_data = f.read() + logger.info(f"[{request_id}] Read temporary file, size: {len(file_data)} bytes") + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") + temp_file.write(file_data) + temp_file.close() + from io import BytesIO + file_data_io = BytesIO(file_data) + from werkzeug.datastructures import FileStorage + file_storage = FileStorage(stream=file_data_io, filename="variation.png", content_type="image/png") + request.files = {"image": file_storage} + form_data = [("model", model), ("n", str(n))] + if image_path: + form_data.append(("image_path", image_path)) + logger.info(f"[{request_id}] Added image_path to form_data: {image_path}") + request.form = MultiDict(form_data) + logger.info(f"[{request_id}] Using file from memcached for image variations") + try: + os.unlink(temp_file_path) + logger.debug(f"[{request_id}] Deleted original temporary file: {temp_file_path}") + except Exception as e: + logger.warning(f"[{request_id}] Failed to delete temporary file: {str(e)}") + else: + logger.error(f"[{request_id}] Temporary file not found: {temp_file_path}") + return jsonify({"error": "Image file not found"}), 400 + except Exception as e: + logger.error(f"[{request_id}] Error processing variation data: {str(e)}") + return jsonify({"error": f"Error processing variation request: {str(e)}"}), 500 + + if "image" not in request.files: + logger.error(f"[{request_id}] No image file provided") + return jsonify({"error": "No image file provided"}), 400 + + image_file = request.files["image"] + original_model = request.form.get("model", "dall-e-2").strip() + n = int(request.form.get("n", 1)) + size = request.form.get("size", "1024x1024") + prompt_text = request.form.get("prompt", "") + relative_image_path = request.form.get("image_path") + if relative_image_path: + logger.debug(f"[{request_id}] Using relative image path: {relative_image_path}") + logger.debug(f"[{request_id}] Original model requested: {original_model} for image variations") + + fallback_models = ["midjourney_6_1", "midjourney", "clipdrop", "dall-e-2"] + if original_model in IMAGE_VARIATION_MODELS: + models_to_try = [original_model] + [m for m in fallback_models if m != original_model] + else: + logger.warning(f"[{request_id}] Model {original_model} does not support image variations. Using fallback models.") + models_to_try = fallback_models + + try: + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") + image_file.save(temp_file.name) + temp_file.close() + except Exception as e: + logger.error(f"[{request_id}] Failed to save temporary file: {str(e)}") + return jsonify({"error": "Failed to process image file"}), 500 + + session = create_session() + headers = {"API-KEY": api_key} + + aspect_width, aspect_height = 1, 1 + if "--ar" in prompt_text: + ar_match = re.search(r'--ar\s+(\d+):(\d+)', prompt_text) + if ar_match: + aspect_width = int(ar_match.group(1)) + aspect_height = int(ar_match.group(2)) + logger.debug(f"[{request_id}] Extracted aspect ratio: {aspect_width}:{aspect_height}") + + variation_urls = [] + current_model = None + + # Перебор моделей для вариаций + for model in models_to_try: + current_model = model + logger.info(f"[{request_id}] Trying model: {model} for image variations") + try: + # Определяем MIME-type и расширение + content_type = "image/png" + if "content-type" in image_response.headers: + content_type = image_response.headers["content-type"] + elif image_url.lower().endswith(".webp"): + content_type = "image/webp" + elif image_url.lower().endswith(".jpg") or image_url.lower().endswith(".jpeg"): + content_type = "image/jpeg" + elif image_url.lower().endswith(".gif"): + content_type = "image/gif" + ext = "png" + if "webp" in content_type: + ext = "webp" + elif "jpeg" in content_type or "jpg" in content_type: + ext = "jpg" + elif "gif" in content_type: + ext = "gif" + logger.debug(f"[{request_id}] Detected image type: {content_type}, extension: {ext}") + + # Загружаем изображение на сервер + with open(temp_file.name, 'rb') as img_file: + files = {"asset": (f"variation.{ext}", img_file, content_type)} + asset_response = session.post(ONE_MIN_ASSET_URL, files=files, headers=headers) + logger.debug(f"[{request_id}] Image upload response status: {asset_response.status_code}") + if asset_response.status_code != 200: + logger.error(f"[{request_id}] Image upload failed: {asset_response.status_code}") + continue + asset_data = asset_response.json() + logger.debug(f"[{request_id}] Asset upload response: {asset_data}") + image_path = None + if "fileContent" in asset_data and "path" in asset_data["fileContent"]: + image_path = asset_data["fileContent"]["path"] + if image_path.startswith('/'): + image_path = image_path[1:] + logger.debug(f"[{request_id}] Using relative path for variation: {image_path}") + else: + logger.error(f"[{request_id}] Could not extract image path from upload response") + continue + + relative_image_url = image_path + if relative_image_url and relative_image_url.startswith('/'): + relative_image_url = relative_image_url[1:] + + if model.startswith("midjourney"): + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": relative_image_url, + "mode": request.form.get("mode", "fast"), + "n": 4, + "isNiji6": False, + "aspect_width": aspect_width, + "aspect_height": aspect_height, + "maintainModeration": True + } + } + elif model == "dall-e-2": + payload = { + "type": "IMAGE_VARIATOR", + "model": "dall-e-2", + "promptObject": { + "imageUrl": relative_image_url, + "n": 1, + "size": "1024x1024" + } + } + logger.info(f"[{request_id}] DALL-E 2 variation payload: {json.dumps(payload, indent=2)}") + variation_response = api_request("POST", ONE_MIN_API_URL, headers=headers, json=payload, timeout=MIDJOURNEY_TIMEOUT) + if variation_response.status_code != 200: + logger.error(f"[{request_id}] DALL-E 2 variation failed: {variation_response.status_code}, {variation_response.text}") + continue + variation_data = variation_response.json() + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with DALL-E 2") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in DALL-E 2 response") + elif model == "clipdrop": + payload = { + "type": "IMAGE_VARIATOR", + "model": "clipdrop", + "promptObject": { + "imageUrl": relative_image_url, + "n": n + } + } + logger.info(f"[{request_id}] Clipdrop variation payload: {json.dumps(payload, indent=2)}") + variation_response = api_request("POST", ONE_MIN_API_URL, headers=headers, json=payload, timeout=MIDJOURNEY_TIMEOUT) + if variation_response.status_code != 200: + logger.error(f"[{request_id}] Clipdrop variation failed: {variation_response.status_code}, {variation_response.text}") + continue + variation_data = variation_response.json() + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with Clipdrop") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in Clipdrop response") + # Если предыдущий блок не сработал, пытаемся ещё раз через основной URL + variation_response = api_request("POST", ONE_MIN_API_URL, headers=headers, json=payload, + timeout=(MIDJOURNEY_TIMEOUT if model.startswith("midjourney") else DEFAULT_TIMEOUT)) + if variation_response.status_code != 200: + logger.error(f"[{request_id}] Variation request with model {model} failed: {variation_response.status_code} - {variation_response.text}") + # When the Gateway Timeout (504) error, we return the error immediately, and do not continue to process + if variation_response.status_code == 504: + logger.error(f"[{request_id}] Midjourney API timeout (504). Returning error to client instead of fallback.") + return jsonify({ + "error": "Gateway Timeout (504) occurred while processing image variation request. Try again later." + }), 504 + continue + variation_data = variation_response.json() + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + result_object = variation_data["aiRecord"]["aiRecordDetail"].get("resultObject", []) + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + elif "resultObject" in variation_data: + result_object = variation_data["resultObject"] + if isinstance(result_object, list): + variation_urls.extend(result_object) + elif isinstance(result_object, str): + variation_urls.append(result_object) + if variation_urls: + logger.info(f"[{request_id}] Successfully created {len(variation_urls)} variations with {model}") + break + else: + logger.warning(f"[{request_id}] No variation URLs found in response for model {model}") + except Exception as e: + logger.error(f"[{request_id}] Error with model {model}: {str(e)}") + continue + + try: + os.unlink(temp_file.name) + except Exception: + pass + + if not variation_urls: + session.close() + return jsonify({"error": "Failed to create image variations with any available model"}), 500 + + full_variation_urls = [] + asset_host = "https://asset.1min.ai" + for url in variation_urls: + if not url: + continue + relative_url = url.split('asset.1min.ai/', 1)[-1] if "asset.1min.ai/" in url else url.lstrip('/') + full_url = get_full_url(url) + full_variation_urls.append({"relative_path": relative_url, "full_url": full_url}) + + openai_data = [{"url": data["relative_path"]} for data in full_variation_urls] + if len(full_variation_urls) == 1: + markdown_text = f"![Variation]({full_variation_urls[0]['full_url']}) `[_V1_]`" + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** and send it (paste) in the next **prompt**" + else: + image_lines = [f"![Variation {i+1}]({data['full_url']}) `[_V{i+1}_]`" for i, data in enumerate(full_variation_urls)] + markdown_text = "\n".join(image_lines) + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**" + + openai_response = { + "created": int(time.time()), + "data": openai_data, + "choices": [{ + "message": { + "role": "assistant", + "content": markdown_text + }, + "index": 0, + "finish_reason": "stop" + }] + } + session.close() + logger.info(f"[{request_id}] Successfully generated {len(openai_data)} image variations using model {current_model}") + return jsonify(openai_response), 200 + + + diff --git a/routes/text.py b/routes/text.py new file mode 100644 index 0000000..43b81e9 --- /dev/null +++ b/routes/text.py @@ -0,0 +1,1312 @@ +# version 1.0.1 #increment every time you make changes +# Маршруты для текстовых моделей +# Импортируем только необходимые модули +from utils.imports import * +from utils.logger import logger +from utils.constants import * +from utils.common import ERROR_HANDLER, handle_options_request, set_response_headers, create_session, api_request, safe_temp_file, calculate_token +from utils.memcached import safe_memcached_operation +from . import app, limiter, IMAGE_CACHE, MAX_CACHE_SIZE, MEMORY_STORAGE # Импортируем app, limiter и IMAGE_CACHE из модуля routes +from .images import retry_image_upload # Импортируем функцию retry_image_upload из модуля images +from .functions import ( + validate_auth, + format_openai_response, + stream_response, + get_model_capabilities, + format_conversation_history, + prepare_payload, + transform_response, + emulate_stream_response, + streaming_request, + create_conversation_with_files +) # Импортируем функции из functions.py + +@app.route("/", methods=["GET", "POST"]) +def index(): + if request.method == "POST": + return ERROR_HANDLER(1212) + if request.method == "GET": + internal_ip = socket.gethostbyname(socket.gethostname()) + return ( + "Congratulations! Your API is working! You can now make requests to the API.\n\nEndpoint: " + + internal_ip + + ":5001/v1" + ) + +@app.route("/v1/models") +@limiter.limit("60 per minute") +def models(): + # Dynamically create the list of models with additional fields + models_data = [] + if not PERMIT_MODELS_FROM_SUBSET_ONLY: + one_min_models_data = [ + { + "id": model_name, + "object": "model", + "owned_by": "1minai", + "created": 1727389042, + } + for model_name in ALL_ONE_MIN_AVAILABLE_MODELS + ] + else: + one_min_models_data = [ + { + "id": model_name, + "object": "model", + "owned_by": "1minai", + "created": 1727389042, + } + for model_name in SUBSET_OF_ONE_MIN_PERMITTED_MODELS + ] + models_data.extend(one_min_models_data) + return jsonify({"data": models_data, "object": "list"}) + +@app.route("/v1/chat/completions", methods=["POST"]) +@limiter.limit("60 per minute") +def conversation(): + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Received request: /v1/chat/completions") + + if not request.json: + return jsonify({"error": "Invalid request format"}), 400 + + # We extract information from the request + api_key = request.headers.get("Authorization", "").replace("Bearer ", "") + if not api_key: + logger.error(f"[{request_id}] No API key provided") + return jsonify({"error": "API key required"}), 401 + + try: + # Build Payload for request + request_data = request.json.copy() + + # We get and normalize the model + model = request_data.get("model", "").strip() + logger.info(f"[{request_id}] Using model: {model}") + + # We check the support of the web post for the model + capabilities = get_model_capabilities(model) + + # We check if the web post is requested through Openai tools + web_search_requested = False + tools = request_data.get("tools", []) + for tool in tools: + if tool.get("type") == "retrieval": + web_search_requested = True + logger.debug(f"[{request_id}] Web search requested via retrieval tool") + break + + # Check the presence of the Web_Search parameter + if not web_search_requested and request_data.get("web_search", False): + web_search_requested = True + logger.debug(f"[{request_id}] Web search requested via web_search parameter") + + # Add a clear web_search parameter if you are requested and supported by the model + if web_search_requested: + if capabilities["retrieval"]: + request_data["web_search"] = True + request_data["num_of_site"] = request_data.get("num_of_site", 1) + request_data["max_word"] = request_data.get("max_word", 1000) + logger.info(f"[{request_id}] Web search enabled for model {model}") + else: + logger.warning(f"[{request_id}] Model {model} does not support web search, ignoring request") + + # We extract the contents of the last message for possible generation of images + messages = request_data.get("messages", []) + prompt_text = "" + if messages and len(messages) > 0: + last_message = messages[-1] + if last_message.get("role") == "user": + content = last_message.get("content", "") + if isinstance(content, str): + prompt_text = content + elif isinstance(content, list): + # Collect all the text parts of the contents + for item in content: + if isinstance(item, dict) and "text" in item: + prompt_text += item["text"] + " " + prompt_text = prompt_text.strip() + + # We check whether the request contains the variation of the image + variation_match = None + if prompt_text: + # We are looking for the format of old teams /v1- /v4 + old_variation_match = re.search(r'/v([1-4])\s+(https?://[^\s]+)', prompt_text) + # We are looking for a format with square brackets [_v1 _]-[_ v4_] + square_variation_match = re.search(r'\[_V([1-4])_\]', prompt_text) + # We are looking for a new format with monoshyrin text `[_V1_]` -` [_V4_] ` + mono_variation_match = re.search(r'`\[_V([1-4])_\]`', prompt_text) + + # If a monoshyrin format is found, we check if there is a URL dialogue in the history + if mono_variation_match and request_data.get("messages"): + variation_number = int(mono_variation_match.group(1)) + logger.debug(f"[{request_id}] Found monospace format variation command: {variation_number}") + + # Looking for the necessary URL in previous messages of the assistant + image_url = None + for msg in reversed(request_data.get("messages", [])): + if msg.get("role") == "assistant" and msg.get("content"): + # Looking for all URL images in the content of the assistant message + content = msg.get("content", "") + # We use a more specific regular expression to search for images with the corresponding numbers + image_urls = [] + # First, we are looking for all URL images in standard Markdown format + url_matches = re.findall(r'!\[(?:Variation\s*(\d+)|[^]]*)\]\((https?://[^\s)]+)', content) + + # We convert the results to the list, taking into account variation rooms + for match in url_matches: + # If there is a variation number, we use it for indexing + variation_num = None + if match[0]: # If the variation number was found + try: + variation_num = int(match[0].strip()) + except ValueError: + pass + + # URL always the second element of the group + url = match[1] + + # Add to the list with the corresponding index or simply add to the end + if variation_num and 0 < variation_num <= 10: # Limit up to 10 variations maximum + # We expand the list to the desired length, if necessary + while len(image_urls) < variation_num: + image_urls.append(None) + image_urls[variation_num-1] = url + else: + image_urls.append(url) + + # We delete all None values ​​from the list + image_urls = [url for url in image_urls if url is not None] + + if image_urls: + # Check the URL number + if len(image_urls) >= variation_number: + # We take the URL corresponding to the requested number + image_url = image_urls[variation_number - 1] + logger.debug( + f"[{request_id}] Found image URL #{variation_number} in assistant message: {image_url}") + break + else: + # Not enough URL for the requested number, we take the first + image_url = image_urls[0] + logger.warning( + f"[{request_id}] Requested variation #{variation_number} but only found {len(image_urls)} URLs. Using first URL: {image_url}") + break + + if image_url: + variation_match = mono_variation_match + logger.info( + f"[{request_id}] Detected monospace variation command: {variation_number} for URL: {image_url}") + # If a format with square brackets is found, we check if there is a URL dialogue in the history + elif square_variation_match and request_data.get("messages"): + variation_number = int(square_variation_match.group(1)) + logger.debug(f"[{request_id}] Found square bracket format variation command: {variation_number}") + + # Looking for the necessary URL in previous messages of the assistant + image_url = None + for msg in reversed(request_data.get("messages", [])): + if msg.get("role") == "assistant" and msg.get("content"): + # Looking for all URL images in the content of the assistant message + content = msg.get("content", "") + url_matches = re.findall(r'!\[.*?\]\((https?://[^\s)]+)', content) + + if url_matches: + # Check the number of URL found + if len(url_matches) >= variation_number: + # We take the URL corresponding to the requested number + image_url = url_matches[variation_number - 1] + logger.debug( + f"[{request_id}] Found image URL #{variation_number} in assistant message: {image_url}") + break + else: + # Not enough URL for the requested number, we take the first + image_url = url_matches[0] + logger.warning( + f"[{request_id}] Requested variation #{variation_number} but only found {len(url_matches)} URLs. Using first URL: {image_url}") + break + + if image_url: + variation_match = square_variation_match + logger.info( + f"[{request_id}] Detected square bracket variation command: {variation_number} for URL: {image_url}") + # If the old format is found, we use it + elif old_variation_match: + variation_match = old_variation_match + variation_number = old_variation_match.group(1) + image_url = old_variation_match.group(2) + logger.info( + f"[{request_id}] Detected old format variation command: {variation_number} for URL: {image_url}") + + if variation_match: + # We process the variation of the image + try: + # We check what type of variation was discovered + if variation_match == mono_variation_match or variation_match == square_variation_match: + # URL has already been obtained above in the search process + variation_number = variation_match.group(1) + else: + # For the old format, we extract the URL directly from the team + variation_number = variation_match.group(1) + image_url = variation_match.group(2) + + logger.info(f"[{request_id}] Processing variation for image: {image_url}") + + # For Midjourney models, add a direct call of the API without downloading the image + if model.startswith("midjourney") and "asset.1min.ai" in image_url: + # We extract a relative path from the URL + path_match = re.search(r'(?:asset\.1min\.ai)/?(images/[^?#]+)', image_url) + if path_match: + relative_path = path_match.group(1) + logger.info(f"[{request_id}] Detected Midjourney variation with relative path: {relative_path}") + + # We get the saved generation parameters from Memcached by Request_id + saved_params = None + try: + # We extract image_id from the image path for searching for parameters + image_id_match = re.search(r'images/(\d+_\d+_\d+_\d+_\d+_\d+|\w+\d+)\.png', relative_path) + if image_id_match: + image_id = image_id_match.group(1) + logger.info(f"[{request_id}] Extracted image_id for variation: {image_id}") + gen_params_key = f"gen_params:{image_id}" + logger.info(f"[{request_id}] Looking for generation parameters with key: {gen_params_key}") + + # Check the presence of parameters in Memory_Storage directly + if gen_params_key in MEMORY_STORAGE: + stored_value = MEMORY_STORAGE[gen_params_key] + logger.info(f"[{request_id}] Found in MEMORY_STORAGE (type: {type(stored_value)}): {stored_value}") + + # If the value is a line, we try to convert it into a python dictionary + if isinstance(stored_value, str): + try: + saved_params = json.loads(stored_value) + logger.info(f"[{request_id}] Successfully parsed JSON string to dict") + except Exception as e: + logger.error(f"[{request_id}] Failed to parse JSON string: {e}") + saved_params = stored_value + else: + saved_params = stored_value + + logger.info(f"[{request_id}] Using parameters directly from MEMORY_STORAGE (type: {type(saved_params)}): {saved_params}") + else: + # If you are not found in Memory_Storage, we try it through Safe_memcache_oporation + logger.info(f"[{request_id}] Not found in MEMORY_STORAGE, trying safe_memcached_operation") + params_json = safe_memcached_operation('get', gen_params_key) + if params_json: + logger.info(f"[{request_id}] Retrieved parameters for image {image_id}: {params_json}") + if isinstance(params_json, str): + try: + saved_params = json.loads(params_json) + except: + saved_params = params_json + elif isinstance(params_json, bytes): + try: + saved_params = json.loads(params_json.decode('utf-8')) + except: + saved_params = params_json.decode('utf-8') + else: + saved_params = params_json + logger.info(f"[{request_id}] Retrieved generation parameters for image {image_id}: {saved_params}") + else: + logger.info(f"[{request_id}] No parameters found in storage for key {gen_params_key}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving generation parameters: {str(e)}") + + # We form Payload for variation + payload = { + "type": "IMAGE_VARIATOR", + "model": model, + "promptObject": { + "imageUrl": relative_path, + "mode": "fast", # Default Fast mode + "n": 4, + "isNiji6": False, + "aspect_width": 1, # By default 1: 1 + "aspect_height": 1, # By default 1: 1 + "maintainModeration": True + } + } + + # We use parameters from Memcache if they are available + if saved_params: + logger.info(f"[{request_id}] Using saved parameters from original generation: {saved_params}") + # We will transfer all the saved parameters + for param in ["mode", "aspect_width", "aspect_height", "isNiji6", "maintainModeration"]: + if param in saved_params: + old_value = payload["promptObject"].get(param) + payload["promptObject"][param] = saved_params[param] + logger.info(f"[{request_id}] Changed parameter {param} from {old_value} to {saved_params[param]}") + else: + logger.info(f"[{request_id}] No saved parameters found, using default ratio 1:1 for Midjourney variations") + # We use the ratio of 1: 1 + payload["promptObject"]["aspect_width"] = 1 + payload["promptObject"]["aspect_height"] = 1 + + # We send a request for variation directly + logger.info(f"[{request_id}] Sending direct Midjourney variation request: {json.dumps(payload)}") + + try: + variation_response = api_request( + "POST", + f"{ONE_MIN_API_URL}", + headers={"API-KEY": api_key, "Content-Type": "application/json"}, + json=payload, + timeout=MIDJOURNEY_TIMEOUT + ) + + if variation_response.status_code == 200: + # We process a successful answer + variation_data = variation_response.json() + logger.info(f"[{request_id}] Received Midjourney variation response: {json.dumps(variation_data)}") + + # We extract the URL variations + variation_urls = [] + + # Midjourney structure structure + if "aiRecord" in variation_data and "aiRecordDetail" in variation_data["aiRecord"]: + record_detail = variation_data["aiRecord"]["aiRecordDetail"] + if "resultObject" in record_detail: + result = record_detail["resultObject"] + if isinstance(result, list): + variation_urls = result + elif isinstance(result, str): + variation_urls = [result] + + # An alternative search path + if not variation_urls and "resultObject" in variation_data: + result = variation_data["resultObject"] + if isinstance(result, list): + variation_urls = result + elif isinstance(result, str): + variation_urls = [result] + + if variation_urls: + logger.info(f"[{request_id}] Found {len(variation_urls)} variation URLs") + + # We form full URLs for display + full_variation_urls = [] + asset_host = "https://asset.1min.ai" + + for url in variation_urls: + # Create a full URL to display + if not url.startswith("http"): + full_url = f"{asset_host}/{url}" + else: + full_url = url + + full_variation_urls.append(full_url) + + # We form a response in Markdown format + markdown_text = "" + if len(full_variation_urls) == 1: + markdown_text = f"![Variation]({full_variation_urls[0]}) `[_V1_]`" + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** and send it (paste) in the next **prompt**" + else: + image_lines = [] + for i, url in enumerate(full_variation_urls): + image_lines.append(f"![Variation {i + 1}]({url}) `[_V{i + 1}_]`") + + markdown_text = "\n".join(image_lines) + markdown_text += "\n\n> To generate **variants** of an **image** - tap (copy) **[_V1_]** - **[_V4_]** and send it (paste) in the next **prompt**" + + # We form an answer in Openai format + openai_response = { + "id": f"chatcmpl-{uuid.uuid4()}", + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": markdown_text + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 0, + "completion_tokens": 0, + "total_tokens": 0 + } + } + + return jsonify(openai_response), 200 + else: + logger.error(f"[{request_id}] No variation URLs found in response") + else: + logger.error(f"[{request_id}] Direct variation request failed: {variation_response.status_code} - {variation_response.text}") + # When the Gateway Timeout (504) error, we return the error immediately, and do not continue to process + if variation_response.status_code == 504: + logger.error(f"[{request_id}] Midjourney API timeout (504). Returning error to client instead of fallback.") + return jsonify({ + "error": "Gateway Timeout (504) occurred while processing image variation request. Try again later." + }), 504 + # With an error with the ratio of the parties (409), we also return the error + elif variation_response.status_code == 409: + error_message = "Error creating image variation" + # Trying to extract an error from an answer + try: + error_json = variation_response.json() + if "message" in error_json: + error_message = error_json["message"] + except: + pass + logger.error(f"[{request_id}] Midjourney API error (409): {error_message}") + return jsonify({ + "error": f"Failed to create image variation: {error_message}" + }), 409 + except Exception as e: + logger.error(f"[{request_id}] Exception during direct variation request: {str(e)}") + # We return the error directly to the client instead of the transition to the backup path + return jsonify({ + "error": f"Error processing direct variation request: {str(e)}" + }), 500 + + # We convert the full URL to a relative path if it corresponds to the Asset.1Min.Ai format + image_path = None + if "asset.1min.ai" in image_url: + # We extract part of the path /images /... + path_match = re.search(r'(?:asset\.1min\.ai)(/images/[^?#]+)', image_url) + if path_match: + image_path = path_match.group(1) + # We remove the initial slash if it is + if image_path.startswith('/'): + image_path = image_path[1:] + else: + # We try to extract the path from the URL in general + path_match = re.search(r'/images/[^?#]+', image_url) + if path_match: + image_path = path_match.group(0) + # We remove the initial slash if it is + if image_path.startswith('/'): + image_path = image_path[1:] + + # If you find a relative path, we use it instead of a complete URL + download_url = image_url + if image_path: + logger.debug(f"[{request_id}] Extracted relative path from image URL: {image_path}") + # We use the full URL for loading, but we keep the relative path + + # Download the image to a temporary file and send a redirection + # On the route/v1/images/variations by analogy s/v1/images/generations + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") + img_response = requests.get(download_url, stream=True) + + if img_response.status_code != 200: + return jsonify( + {"error": f"Failed to download image from URL. Status code: {img_response.status_code}"}), 400 + + with open(temp_file.name, 'wb') as f: + for chunk in img_response.iter_content(chunk_size=8192): + f.write(chunk) + + # We save the path to the temporary file in memory for use in the route/v1/images/variations + variation_key = f"variation:{request_id}" + variation_data = { + "temp_file": temp_file.name, + "model": model, + "n": request_data.get("n", 1), + "image_path": image_path # We keep the relative path if it is + } + # We use Safe_MemCeched_Operation, which now supports Memory_storage + safe_memcached_operation('set', variation_key, variation_data, expiry=300) # Store 5 minutes + logger.debug(f"[{request_id}] Saved variation data with key: {variation_key}") + + # We redirect the route/v1/images/variations + logger.info(f"[{request_id}] Redirecting to /v1/images/variations with model {model}") + + # Add detailed logistics for diagnosis + logger.info(f"[{request_id}] Temp file path: {temp_file.name}, exists: {os.path.exists(temp_file.name)}") + logger.info(f"[{request_id}] Image path: {image_path}") + logger.info(f"[{request_id}] Variation data prepared with temp file and image path") + + return redirect(url_for('image_variations', request_id=request_id), code=307) + + except Exception as e: + logger.error(f"[{request_id}] Error processing variation command: {str(e)}") + return jsonify({"error": f"Failed to process variation command: {str(e)}"}), 500 + + + # We log in the extracted Prompt for debugging + logger.debug(f"[{request_id}] Extracted prompt text: {prompt_text[:100]}..." if len( + prompt_text) > 100 else f"[{request_id}] Extracted prompt text: {prompt_text}") + + # We check whether the model belongs to one of the special types + # For images generation models + if model in IMAGE_GENERATION_MODELS: + logger.info(f"[{request_id}] Redirecting image generation model to /v1/images/generations") + + # We create a new request only with the necessary fields to generate image + # We take only the current user's current production without combining with history + image_request = { + "model": model, + "prompt": prompt_text, # Only the current request + "n": request_data.get("n", 1), + "size": request_data.get("size", "1024x1024") + } + + # Add additional parameters for certain models + if model == "dall-e-3": + image_request["quality"] = request_data.get("quality", "standard") + image_request["style"] = request_data.get("style", "vivid") + + # We check the availability of special parameters in Prompt for models type Midjourney + if model.startswith("midjourney"): + # Add inspections and parameters for midjourney models + if "--ar" in prompt_text or "\u2014ar" in prompt_text: + logger.debug(f"[{request_id}] Found aspect ratio parameter in prompt") + elif request_data.get("aspect_ratio"): + image_request["aspect_ratio"] = request_data.get("aspect_ratio") + + if "--no" in prompt_text or "\u2014no" in prompt_text: + logger.debug(f"[{request_id}] Found negative prompt parameter in prompt") + elif request_data.get("negative_prompt"): + # Add negative prompt field as a separate parameter + image_request["negative_prompt"] = request_data.get("negative_prompt") + + # We delete messages from the request to avoid combining history + if "messages" in image_request: + del image_request["messages"] + + logger.debug(f"[{request_id}] Final image request: {json.dumps(image_request)[:200]}...") + + # We save a modified request (only the last request without history) + request.environ["body_copy"] = json.dumps(image_request) + return redirect(url_for('generate_image'), code=307) # 307 preserves the method and body of the request + + # For speech generation models (TTS) + if model in TEXT_TO_SPEECH_MODELS: + logger.info(f"[{request_id}] Processing text-to-speech request directly") + + if not prompt_text: + logger.error(f"[{request_id}] No input text provided for TTS") + return jsonify({"error": "No input text provided"}), 400 + + logger.debug(f"[{request_id}] TTS input text: {prompt_text[:100]}..." if len(prompt_text) > 100 else f"[{request_id}] TTS input text: {prompt_text}") + + voice = request_data.get("voice", "alloy") + response_format = request_data.get("response_format", "mp3") + speed = request_data.get("speed", 1.0) + + # Импортируем функцию prepare_tts_payload для формирования правильного payload + from routes.functions.audio_func import prepare_tts_payload + + # Используем функцию для создания payload с поддержкой русского языка + payload = prepare_tts_payload(model, prompt_text, voice, speed, response_format) + + # Логируем полный payload для отладки + logger.debug(f"[{request_id}] TTS payload: {json.dumps(payload, ensure_ascii=False)}") + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + try: + # Send the request directly + logger.debug(f"[{request_id}] Sending direct TTS request to {ONE_MIN_API_URL}") + response = api_request("POST", ONE_MIN_API_URL, json=payload, headers=headers) + logger.debug(f"[{request_id}] TTS response status code: {response.status_code}") + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + + # Логируем полный ответ для отладки + error_text = "Unknown error" + try: + error_data = response.json() + error_text = json.dumps(error_data, ensure_ascii=False) + logger.error(f"[{request_id}] Detailed error response: {error_text}") + except: + if hasattr(response, 'text'): + error_text = response.text + + logger.error(f"[{request_id}] Error in TTS response: {error_text[:200]}") + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + # We get a URL audio from the answer + one_min_response = response.json() + audio_url = "" + + if "aiRecord" in one_min_response and "aiRecordDetail" in one_min_response["aiRecord"]: + result_object = one_min_response["aiRecord"]["aiRecordDetail"].get("resultObject", "") + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + else: + audio_url = result_object + elif "resultObject" in one_min_response: + result_object = one_min_response["resultObject"] + if isinstance(result_object, list) and result_object: + audio_url = result_object[0] + else: + audio_url = result_object + + if not audio_url: + logger.error(f"[{request_id}] Could not extract audio URL from API response") + return jsonify({"error": "Could not extract audio URL"}), 500 + + # Instead of downloading audio, we form a response with Markdown + logger.info(f"[{request_id}] Successfully generated speech audio URL: {audio_url}") + + # We get a full URL for the audio file + try: + # We check for the presence of a complete signed link in the response of the API + signed_url = None + + # Check the availability of the Temporaryurl field in the answer (according to the API response format) + if "temporaryUrl" in one_min_response: + signed_url = one_min_response["temporaryUrl"] + logger.debug(f"[{request_id}] Found temporaryUrl in API response root") + elif "result" in one_min_response and "resultList" in one_min_response["result"]: + # Check in the list of results + for item in one_min_response["result"]["resultList"]: + if item.get("type") == "TEXT_TO_SPEECH" and "temporaryUrl" in item: + signed_url = item["temporaryUrl"] + logger.debug(f"[{request_id}] Found temporaryUrl in resultList") + break + + # Checking in Airecord, if there are no links in the main places + if not signed_url and "aiRecord" in one_min_response: + if "temporaryUrl" in one_min_response["aiRecord"]: + signed_url = one_min_response["aiRecord"]["temporaryUrl"] + logger.debug(f"[{request_id}] Found temporaryUrl in aiRecord") + + # We check other possible fields for reverse compatibility + if not signed_url: + # We are looking for in various places in the API response format + if "aiRecord" in one_min_response and "aiRecordDetail" in one_min_response["aiRecord"]: + if "signedUrls" in one_min_response["aiRecord"]["aiRecordDetail"]: + signed_urls = one_min_response["aiRecord"]["aiRecordDetail"]["signedUrls"] + if isinstance(signed_urls, list) and signed_urls: + signed_url = signed_urls[0] + elif isinstance(signed_urls, str): + signed_url = signed_urls + elif "signedUrl" in one_min_response["aiRecord"]["aiRecordDetail"]: + signed_url = one_min_response["aiRecord"]["aiRecordDetail"]["signedUrl"] + elif "signedUrls" in one_min_response: + signed_urls = one_min_response["signedUrls"] + if isinstance(signed_urls, list) and signed_urls: + signed_url = signed_urls[0] + elif isinstance(signed_urls, str): + signed_url = signed_urls + elif "signedUrl" in one_min_response: + signed_url = one_min_response["signedUrl"] + + # We use the received signed link or basic URL + if signed_url: + full_audio_url = signed_url + logger.debug(f"[{request_id}] Using signed URL from API: {signed_url[:100]}...") + else: + # If there is no signed link, we use the basic URL in S3 format + # Although without a signature, he will most likely not work + full_audio_url = f"https://s3.us-east-1.amazonaws.com/asset.1min.ai/{audio_url}" + logger.warning(f"[{request_id}] No signed URL found, using base S3 URL: {full_audio_url}") + + except Exception as e: + logger.error(f"[{request_id}] Error processing audio URL: {str(e)}") + full_audio_url = f"https://asset.1min.ai/{audio_url}" + logger.warning(f"[{request_id}] Error occurred, using fallback URL: {full_audio_url}") + + # We form a response in the format similar to Chat Complets + completion_response = { + "id": f"chatcmpl-{request_id}", + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": f"🔊 [Audio.mp3]({full_audio_url})" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": len(prompt_text.split()), + "completion_tokens": 1, + "total_tokens": len(prompt_text.split()) + 1 + } + } + + return jsonify(completion_response) + + except Exception as e: + logger.error(f"[{request_id}] Exception during TTS request: {str(e)}") + return jsonify({"error": str(e)}), 500 + + # For models of audio transcription (STT) + if model in SPEECH_TO_TEXT_MODELS: + logger.info(f"[{request_id}] Redirecting speech-to-text model to /v1/audio/transcriptions") + return redirect(url_for('audio_transcriptions'), code=307) + + # Let's journal the beginning of the request + logger.debug(f"[{request_id}] Processing chat completion request") + + # Check whether the image of the image contains + image = False + image_paths = [] + + # Check the availability of user files for working with PDF + user_file_ids = [] + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None: + try: + user_key = f"user:{api_key}" + user_files_json = safe_memcached_operation('get', user_key) + if user_files_json: + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + + if user_files and isinstance(user_files, list): + # We extract the ID files + user_file_ids = [file_info.get("id") for file_info in user_files if file_info.get("id")] + logger.debug(f"[{request_id}] Found user files: {user_file_ids}") + except Exception as e: + logger.error(f"[{request_id}] Error parsing user files from memcached: {str(e)}") + except Exception as e: + logger.error(f"[{request_id}] Error retrieving user files from memcached: {str(e)}") + else: + logger.debug(f"[{request_id}] Memcached not available, no user files loaded") + + # We check the availability of messages before the start of processing + if not messages: + logger.error(f"[{request_id}] No messages provided in request") + return ERROR_HANDLER(1412) + + # We extract the text of the request for analysis + extracted_prompt = messages[-1].get("content", "") + if isinstance(extracted_prompt, list): + extracted_prompt = " ".join([item.get("text", "") for item in extracted_prompt if "text" in item]) + extracted_prompt_lower = extracted_prompt.lower() if extracted_prompt else "" + + # If the request does not indicate File_ids, but the user has uploaded files, + # Add them to the request only if the message mentions something about files or documents + file_keywords = ["файл", "файлы", "file", "files", "документ", "документы", "document", "documents"] + prompt_has_file_keywords = False + + # Check the availability of keywords about files in the request + if extracted_prompt_lower: + prompt_has_file_keywords = any(keyword in extracted_prompt_lower for keyword in file_keywords) + + # Add files only if the user requested work with files or clearly indicated File_ids + if (not request_data.get("file_ids") and user_file_ids and prompt_has_file_keywords): + logger.info(f"[{request_id}] Adding user files to request: {user_file_ids}") + request_data["file_ids"] = user_file_ids + elif not request_data.get("file_ids") and user_file_ids: + logger.debug(f"[{request_id}] User has files but didn't request to use them in this message") + + # We get the contents of the last message for further processing + user_input = messages[-1].get("content") + if not user_input: + logger.error(f"[{request_id}] No content in last message") + return ERROR_HANDLER(1423) + + # We form the history of dialogue + all_messages = format_conversation_history( + request_data.get("messages", []), request_data.get("new_input", "") + ) + + # Checking for the presence of images in the last message + if isinstance(user_input, list): + logger.debug( + f"[{request_id}] Processing message with multiple content items (text/images)" + ) + combined_text = "" + for i, item in enumerate(user_input): + if "text" in item: + combined_text += item["text"] + "\n" + logger.debug(f"[{request_id}] Added text content from item {i + 1}") + + if "image_url" in item: + if model not in VISION_SUPPORTED_MODELS: + logger.error( + f"[{request_id}] Model {model} does not support images" + ) + return ERROR_HANDLER(1044, model) + + # Create a hash url image for caching + image_key = None + image_url = None + + # We extract the URL images + if ( + isinstance(item["image_url"], dict) + and "url" in item["image_url"] + ): + image_url = item["image_url"]["url"] + else: + image_url = item["image_url"] + + # Heshchit url for the cache + if image_url: + image_key = hashlib.md5(image_url.encode("utf-8")).hexdigest() + + # Check the cache + if image_key and image_key in IMAGE_CACHE: + cached_path = IMAGE_CACHE[image_key] + logger.debug( + f"[{request_id}] Using cached image path for item {i + 1}: {cached_path}" + ) + image_paths.append(cached_path) + image = True + continue + + # We load the image if it is not in the cache + logger.debug( + f"[{request_id}] Processing image URL in item {i + 1}: {image_url[:30]}..." + ) + + # We load the image + image_path = retry_image_upload( + image_url, api_key, request_id=request_id + ) + + if image_path: + # We save in the cache + if image_key: + IMAGE_CACHE[image_key] = image_path + # Clean the old notes if necessary + if len(IMAGE_CACHE) > MAX_CACHE_SIZE: + old_key = next(iter(IMAGE_CACHE)) + del IMAGE_CACHE[old_key] + + image_paths.append(image_path) + image = True + logger.debug( + f"[{request_id}] Image {i + 1} successfully processed: {image_path}" + ) + else: + logger.error(f"[{request_id}] Failed to upload image {i + 1}") + + # We replace user_input with the textual part only if it is not empty + if combined_text: + user_input = combined_text + + # We check if there is File_ids for a chat with documents + file_ids = request_data.get("file_ids", []) + conversation_id = request_data.get("conversation_id", None) + + # We extract the text of the request for the analysis of keywords + prompt_text = all_messages.lower() + extracted_prompt = messages[-1].get("content", "") + if isinstance(extracted_prompt, list): + extracted_prompt = " ".join([item.get("text", "") for item in extracted_prompt if "text" in item]) + extracted_prompt = extracted_prompt.lower() + + logger.debug(f"[{request_id}] Extracted prompt text: {extracted_prompt}") + + # We check the file deletion request + delete_keywords = ["удалить", "удали", "удаление", "очисти", "очистка", "delete", "remove", "clean"] + file_keywords = ["файл", "файлы", "file", "files", "документ", "документы", "document", "documents"] + mime_type_keywords = ["pdf", "txt", "doc", "docx", "csv", "xls", "xlsx", "json", "md", "html", "htm", "xml", + "pptx", "ppt", "rtf"] + + # Combine all keywords for files + all_file_keywords = file_keywords + mime_type_keywords + + # We check the request for file deletion (there must be keywords of deletion and file keywords) + has_delete_keywords = any(keyword in extracted_prompt for keyword in delete_keywords) + has_file_keywords = any(keyword in extracted_prompt for keyword in all_file_keywords) + + if has_delete_keywords and has_file_keywords and user_file_ids: + logger.info(f"[{request_id}] Deletion request detected, removing all user files") + + # Trying to get ID teams + team_id = None + try: + # Trying to get ID commands through API + teams_url = f"{ONE_MIN_API_URL}/teams" + teams_headers = {"API-KEY": api_key} + teams_response = api_request("GET", teams_url, headers=teams_headers) + if teams_response.status_code == 200: + teams_data = teams_response.json() + if "data" in teams_data and teams_data["data"]: + team_id = teams_data["data"][0].get("id") + logger.debug(f"[{request_id}] Found team ID for deletion: {team_id}") + except Exception as e: + logger.error(f"[{request_id}] Error getting team ID for deletion: {str(e)}") + + deleted_files = [] + for file_id in user_file_ids: + try: + # We form a URL to delete the file depending on the availability of Team_id + if team_id: + delete_url = f"{ONE_MIN_API_URL}/teams/{team_id}/assets/{file_id}" + else: + delete_url = f"{ONE_MIN_ASSET_URL}/{file_id}" + + logger.debug(f"[{request_id}] Using URL for deletion: {delete_url}") + headers = {"API-KEY": api_key} + + delete_response = api_request("DELETE", delete_url, headers=headers) + + if delete_response.status_code == 200: + logger.info(f"[{request_id}] Successfully deleted file: {file_id}") + deleted_files.append(file_id) + else: + logger.error(f"[{request_id}] Failed to delete file {file_id}: {delete_response.status_code}") + except Exception as e: + logger.error(f"[{request_id}] Error deleting file {file_id}: {str(e)}") + + # Clean the user's list of user files in Memcache + if 'MEMCACHED_CLIENT' in globals() and MEMCACHED_CLIENT is not None and deleted_files: + try: + user_key = f"user:{api_key}" + safe_memcached_operation('set', user_key, json.dumps([])) + logger.info(f"[{request_id}] Cleared user files list in memcached") + except Exception as e: + logger.error(f"[{request_id}] Error clearing user files in memcached: {str(e)}") + + # Send a response to file deletion + return jsonify({ + "id": str(uuid.uuid4()), + "object": "chat.completion", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": f"Удалено файлов: {len(deleted_files)}. Список файлов очищен." + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": calculate_token(prompt_text), + "completion_tokens": 20, + "total_tokens": calculate_token(prompt_text) + 20 + } + }), 200 + + # We check the request for keywords for file processing + has_file_reference = any(keyword in extracted_prompt for keyword in all_file_keywords) + + # If there is File_ids and the request contains keywords about files or there are ID conversations, we use Chat_with_PDF + if file_ids and len(file_ids) > 0: + logger.debug( + f"[{request_id}] Creating CHAT_WITH_PDF request with {len(file_ids)} files" + ) + + # Add instructions for working with documents to Prompt + enhanced_prompt = all_messages + if not enhanced_prompt.strip().startswith(DOCUMENT_ANALYSIS_INSTRUCTION): + enhanced_prompt = f"{DOCUMENT_ANALYSIS_INSTRUCTION}\n\n{all_messages}" + + # We get the user Team_id + team_id = None + try: + teams_url = "https://api.1min.ai/api/teams" # Correct URL C /API / + teams_headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + logger.debug(f"[{request_id}] Fetching team ID from: {teams_url}") + teams_response = requests.get(teams_url, headers=teams_headers) + + if teams_response.status_code == 200: + teams_data = teams_response.json() + if "data" in teams_data and teams_data["data"]: + team_id = teams_data["data"][0].get("id") + logger.debug(f"[{request_id}] Got team ID: {team_id}") + else: + logger.warning( + f"[{request_id}] Failed to get team ID: {teams_response.status_code} - {teams_response.text}") + except Exception as e: + logger.error(f"[{request_id}] Error getting team ID: {str(e)}") + + # If there is no Conversation_id, we create a new conversation + if not conversation_id: + conversation_id = create_conversation_with_files( + file_ids, "Chat with documents", model, api_key, request_id + ) + if not conversation_id: + return ( + jsonify({"error": "Failed to create conversation with files"}), + 500, + ) + + # We form Payload to request files + payload = {"message": enhanced_prompt} + if conversation_id: + payload["conversationId"] = conversation_id + + # We use the correct URL API C /API / + api_url = "https://api.1min.ai/api/features/conversations/messages" + # Add Conversationid as a request parameter + api_params = {"conversationId": conversation_id} + + logger.debug( + f"[{request_id}] Sending message to conversation using URL: {api_url} with params: {api_params}") + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # Depending on the Stream parameter, select the request method + if request_data.get("stream", False): + # Streaming request + return streaming_request( + api_url, payload, headers, request_id, model, model_settings=None, api_params=api_params + ) + else: + # The usual request + try: + response = requests.post(api_url, json=payload, headers=headers, params=api_params) + + logger.debug(f"[{request_id}] API response status code: {response.status_code}") + if response.status_code != 200: + logger.error( + f"[{request_id}] API error: {response.status_code} - {response.text}" + ) + return ( + jsonify({"error": "API request failed", "details": response.text}), + response.status_code, + ) + + # We convert the answer to the Openai format + response_data = response.json() + logger.debug(f"[{request_id}] Raw API response: {json.dumps(response_data)[:500]}...") + + # We extract a response from different places of data structure + ai_response = None + if "answer" in response_data: + ai_response = response_data["answer"] + elif "message" in response_data: + ai_response = response_data["message"] + elif "result" in response_data: + ai_response = response_data["result"] + elif "aiRecord" in response_data and "aiRecordDetail" in response_data["aiRecord"]: + ai_response = response_data["aiRecord"]["aiRecordDetail"].get("answer", "") + + if not ai_response: + # Recursively looking for a response on Keys Asswer, Message, Result + def find_response(obj, path=""): + if isinstance(obj, dict): + for key in ["answer", "message", "result"]: + if key in obj: + logger.debug(f"[{request_id}] Found response at path '{path}.{key}'") + return obj[key] + + for key, value in obj.items(): + result = find_response(value, f"{path}.{key}") + if result: + return result + elif isinstance(obj, list): + for i, item in enumerate(obj): + result = find_response(item, f"{path}[{i}]") + if result: + return result + return None + + ai_response = find_response(response_data) + + if not ai_response: + logger.error(f"[{request_id}] Could not extract AI response from API response") + return jsonify({"error": "Could not extract AI response"}), 500 + + openai_response = format_openai_response( + ai_response, model, request_id + ) + return jsonify(openai_response) + except Exception as e: + logger.error( + f"[{request_id}] Exception while processing API response: {str(e)}" + ) + traceback.print_exc() + return jsonify({"error": str(e)}), 500 + + # Counting tokens + prompt_token = calculate_token(str(all_messages)) + + # Checking the model + if PERMIT_MODELS_FROM_SUBSET_ONLY and model not in AVAILABLE_MODELS: + return ERROR_HANDLER(1002, model) + + logger.debug( + f"[{request_id}] Processing {prompt_token} prompt tokens with model {model}" + ) + + # Prepare Payload, taking into account the capabilities of the model + payload = prepare_payload( + request_data, model, all_messages, image_paths, request_id + ) + + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + # Request depending on Stream + if request_data.get("stream", False): + # Streaming request + logger.debug(f"[{request_id}] Sending streaming request") + + # URL for streaming mode + streaming_url = f"{ONE_MIN_API_URL}?isStreaming=true" + + logger.debug(f"[{request_id}] Streaming URL: {streaming_url}") + logger.debug(f"[{request_id}] Payload: {json.dumps(payload)[:200]}...") + + # If a web pion is included, we display a full websearch block for debugging + if "promptObject" in payload and payload["promptObject"].get("webSearch"): + logger.info(f"[{request_id}] Web search parameters in payload: " + + f"webSearch={payload['promptObject'].get('webSearch')}, " + + f"numOfSite={payload['promptObject'].get('numOfSite')}, " + + f"maxWord={payload['promptObject'].get('maxWord')}") + + try: + # We use a session to control the connection + session = create_session() + response_stream = session.post( + streaming_url, json=payload, headers=headers, stream=True + ) + + logger.debug( + f"[{request_id}] Streaming response status code: {response_stream.status_code}" + ) + + if response_stream.status_code != 200: + if response_stream.status_code == 401: + session.close() + return ERROR_HANDLER(1020, key=api_key) + + logger.error( + f"[{request_id}] Error status code: {response_stream.status_code}" + ) + try: + error_content = response_stream.json() + logger.error(f"[{request_id}] Error response: {error_content}") + except: + logger.error( + f"[{request_id}] Could not parse error response as JSON" + ) + + session.close() + return ERROR_HANDLER(response_stream.status_code) + + # We transfer the session to Generator + return Response( + stream_response( + response_stream, request_data, model, prompt_token, session + ), + content_type="text/event-stream", + ) + except Exception as e: + logger.error( + f"[{request_id}] Exception during streaming request: {str(e)}" + ) + return jsonify({"error": str(e)}), 500 + else: + # The usual request + logger.debug( + f"[{request_id}] Sending non-streaming request to {ONE_MIN_API_URL}" + ) + + try: + response = api_request( + "POST", ONE_MIN_API_URL, json=payload, headers=headers + ) + logger.debug( + f"[{request_id}] Response status code: {response.status_code}" + ) + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + try: + error_content = response.json() + logger.error(f"[{request_id}] Error response: {error_content}") + except: + logger.error( + f"[{request_id}] Could not parse error response as JSON" + ) + return ERROR_HANDLER(response.status_code) + + one_min_response = response.json() + transformed_response = transform_response( + one_min_response, request_data, prompt_token + ) + + response = make_response(jsonify(transformed_response)) + set_response_headers(response) + return response, 200 + except Exception as e: + logger.error(f"[{request_id}] Exception during request: {str(e)}") + return jsonify({"error": str(e)}), 500 + except Exception as e: + logger.error( + f"[{request_id}] Exception during conversation processing: {str(e)}" + ) + traceback.print_exc() + return ( + jsonify({"error": f"Error during conversation processing: {str(e)}"}), + 500, + ) + +@app.route("/v1/assistants", methods=["POST", "OPTIONS"]) +@limiter.limit("60 per minute") +def create_assistant(): + if request.method == "OPTIONS": + return handle_options_request() + + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + logger.error("Invalid Authentication") + return ERROR_HANDLER(1021) + + api_key = auth_header.split(" ")[1] + headers = {"API-KEY": api_key, "Content-Type": "application/json"} + + request_data = request.json + name = request_data.get("name", "PDF Assistant") + instructions = request_data.get("instructions", "") + model = request_data.get("model", "gpt-4o-mini") + file_ids = request_data.get("file_ids", []) + + # Creating a conversation with PDF in 1min.ai + payload = { + "title": name, + "type": "CHAT_WITH_PDF", + "model": model, + "fileList": file_ids, + } + + response = requests.post( + ONE_MIN_CONVERSATION_API_URL, json=payload, headers=headers + ) + + if response.status_code != 200: + if response.status_code == 401: + return ERROR_HANDLER(1020, key=api_key) + return ( + jsonify({"error": response.json().get("error", "Unknown error")}), + response.status_code, + ) + + one_min_response = response.json() + + try: + conversation_id = one_min_response.get("id") + + openai_response = { + "id": f"asst_{conversation_id}", + "object": "assistant", + "created_at": int(time.time()), + "name": name, + "description": None, + "model": model, + "instructions": instructions, + "tools": [], + "file_ids": file_ids, + "metadata": {}, + } + + response = make_response(jsonify(openai_response)) + set_response_headers(response) + return response, 200 + except Exception as e: + return jsonify({"error": str(e)}), 500 + diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..d7bd871 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,32 @@ +# version 1.0.1 #increment every time you make changes +# utils/__init__.py +# Инициализация пакета utils, импорт модулей в правильном порядке + +# Сначала импортируем централизованный модуль импортов +from .imports import * + +# Затем импортируем логгер (баннер уже выводится при импорте logger) +from .logger import logger + +# Потом импортируем константы +from .constants import * + +# Импортируем функцию для установки глобальных ссылок из memcached +from .memcached import set_global_refs + +# Наконец, импортируем общие функции +from .common import ( + ERROR_HANDLER, + handle_options_request, + set_response_headers, + create_session, + api_request, + safe_temp_file, + calculate_token, + split_text_for_streaming +) + +# Примечание: остальные функции из модуля memcached не импортируются здесь напрямую, +# чтобы избежать циклической зависимости. При необходимости их +# следует импортировать в конкретном модуле: +# from utils.memcached import safe_memcached_operation, check_memcached_connection diff --git a/utils/common.py b/utils/common.py new file mode 100644 index 0000000..4b36248 --- /dev/null +++ b/utils/common.py @@ -0,0 +1,311 @@ +# version 1.0.1 #increment every time you make changes +# utils/common.py +# Общие утилиты +from .imports import * +from .logger import logger +from .constants import * + +def calculate_token(sentence, model="DEFAULT"): + """ + Рассчитывает количество токенов в строке, используя соответствующую модели токенизацию. + + Args: + sentence (str): Текст для подсчета токенов + model (str): Модель, для которой необходимо посчитать токены + + Returns: + int: Количество токенов в строке + """ + if not sentence: + return 0 + + try: + # Выбираем энкодер в зависимости от модели + encoder_name = "gpt-4" # Дефолтный энкодер + + if model.startswith("mistral"): + encoder_name = "gpt-4" # Для Mistral используем OpenAI токенизатор + elif model in ["gpt-3.5-turbo", "gpt-4"]: + encoder_name = model + + # Получаем токенизатор и считаем токены + encoding = tiktoken.encoding_for_model(encoder_name) + tokens = encoding.encode(sentence) + return len(tokens) + except Exception as e: + logger.warning(f"Ошибка при подсчете токенов: {str(e)}. Используем приблизительную оценку.") + # Приблизительно оцениваем количество токенов как 3/4 количества символов + return len(sentence) * 3 // 4 + +def api_request(req_method, url, headers=None, requester_ip=None, data=None, + files=None, stream=False, timeout=None, json=None, **kwargs): + """ + Выполняет HTTP-запрос к API с нормализацией URL и обработкой ошибок. + + Args: + req_method (str): Метод запроса (GET, POST, и т.д.) + url (str): URL для запроса + headers (dict, optional): Заголовки запроса + requester_ip (str, optional): IP запрашивающего для логирования + data (dict/str, optional): Данные для запроса + files (dict, optional): Файлы для запроса + stream (bool, optional): Флаг для потоковой передачи данных + timeout (int, optional): Таймаут запроса в секундах + json (dict, optional): JSON-данные для запроса + **kwargs: Дополнительные параметры для requests + + Returns: + Response: Объект ответа от API + """ + req_url = url.strip() + logger.debug(f"API request URL: {req_url}") + + # Формируем параметры запроса + req_params = {k: v for k, v in { + "headers": headers, + "data": data, + "files": files, + "stream": stream, + "json": json + }.items() if v is not None} + + # Добавляем остальные параметры + req_params.update(kwargs) + + # Определяем, является ли запрос операцией с изображениями + is_image_operation = False + if json and isinstance(json, dict): + operation_type = json.get("type", "") + if operation_type in [IMAGE_GENERATOR, IMAGE_VARIATOR]: + is_image_operation = True + logger.debug(f"Обнаружена операция с изображением: {operation_type}, используем расширенный таймаут") + + # Устанавливаем таймаут в зависимости от типа операции + req_params["timeout"] = timeout or (MIDJOURNEY_TIMEOUT if is_image_operation else DEFAULT_TIMEOUT) + + # Выполняем запрос + try: + response = requests.request(req_method, req_url, **req_params) + return response + except Exception as e: + logger.error(f"Ошибка API запроса: {str(e)}") + raise + +def set_response_headers(response): + """ + Устанавливает стандартные заголовки для всех ответов API. + + Args: + response: Объект ответа Flask + + Returns: + Response: Модифицированный объект ответа с добавленными заголовками + """ + response.headers.update({ + "Content-Type": "application/json", + "Access-Control-Allow-Origin": "*", + "X-Request-ID": str(uuid.uuid4()), + "Access-Control-Allow-Methods": "GET, POST, OPTIONS", + "Access-Control-Allow-Headers": "Authorization, Content-Type, Accept" + }) + return response + +def create_session(): + """ + Создает новую сессию с оптимальными настройками для API запросов. + + Returns: + Session: Настроенная сессия requests + """ + session = requests.Session() + + # Настраиваем стратегию повторных попыток для всех запросов + retry_strategy = requests.packages.urllib3.util.retry.Retry( + total=3, + backoff_factor=1, + status_forcelist=[429, 500, 502, 503, 504], + allowed_methods=["HEAD", "GET", "POST", "PUT", "DELETE", "OPTIONS", "TRACE"], + ) + adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy) + session.mount("http://", adapter) + session.mount("https://", adapter) + + # Устанавливаем увеличенные таймауты по умолчанию для всей сессии + # 30 секунд на подключение, 120 секунд на получение данных + session.request = functools.partial(session.request, timeout=(60, 300)) + + return session + +def safe_temp_file(prefix, request_id=None): + """ + Безопасно создает временный файл и гарантирует его удаление после использования. + + Args: + prefix (str): Префикс для имени файла + request_id (str, optional): ID запроса для логирования + + Returns: + str: Путь к временному файлу + """ + request_id = request_id or str(uuid.uuid4())[:8] + random_string = "".join(random.choices(string.ascii_letters + string.digits, k=10)) + temp_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "temp") + + # Создаем временную директорию, если её нет + os.makedirs(temp_dir, exist_ok=True) + + # Очищаем старые файлы (старше 1 часа) + try: + current_time = time.time() + for old_file in os.listdir(temp_dir): + file_path = os.path.join(temp_dir, old_file) + if os.path.isfile(file_path) and (current_time - os.path.getmtime(file_path) > 3600): + try: + os.remove(file_path) + logger.debug(f"[{request_id}] Удален старый временный файл: {file_path}") + except Exception as e: + logger.warning(f"[{request_id}] Не удалось удалить старый временный файл {file_path}: {str(e)}") + except Exception as e: + logger.warning(f"[{request_id}] Ошибка при очистке старых временных файлов: {str(e)}") + + # Создаем новый временный файл + temp_file_path = os.path.join(temp_dir, f"{prefix}_{request_id}_{random_string}") + return temp_file_path + +def ERROR_HANDLER(code, model=None, key=None): + """ + Обработчик ошибок в формате совместимом с OpenAI API. + + Args: + code (int): Внутренний код ошибки + model (str, optional): Имя модели (для ошибок, связанных с моделями) + key (str, optional): API ключ (для ошибок аутентификации) + + Returns: + tuple: (JSON с ошибкой, HTTP-код ответа) + """ + # Словарь кодов ошибок + error_codes = { + 1002: { + "message": f"The model {model} does not exist.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_found", + "http_code": 400, + }, + 1020: { + "message": f"Incorrect API key provided: {key}. You can find your API key at https://app.1min.ai/api.", + "type": "authentication_error", + "param": None, + "code": "invalid_api_key", + "http_code": 401, + }, + 1021: { + "message": "Invalid Authentication", + "type": "invalid_request_error", + "param": None, + "code": None, + "http_code": 401, + }, + 1212: { + "message": f"Incorrect Endpoint. Please use the /v1/chat/completions endpoint.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_supported", + "http_code": 400, + }, + 1044: { + "message": f"This model does not support image inputs.", + "type": "invalid_request_error", + "param": None, + "code": "model_not_supported", + "http_code": 400, + }, + 1412: { + "message": f"No message provided.", + "type": "invalid_request_error", + "param": "messages", + "code": "invalid_request_error", + "http_code": 400, + }, + 1423: { + "message": f"No content in last message.", + "type": "invalid_request_error", + "param": "messages", + "code": "invalid_request_error", + "http_code": 400, + }, + } + + # Получаем данные об ошибке или используем данные по умолчанию + error_data = error_codes.get(code, { + "message": f"Unknown error (code: {code})", + "type": "unknown_error", + "param": None, + "code": None, + "http_code": 400 + }) + + # Удаляем http_code из данных ответа + http_code = error_data.pop("http_code", 400) + + logger.error(f"Ошибка при обработке запроса пользователя. Код ошибки: {code}") + return jsonify({"error": error_data}), http_code + +def handle_options_request(): + """ + Обработчик OPTIONS запросов для CORS. + + Returns: + tuple: (Объект ответа, HTTP-код ответа 204) + """ + response = make_response() + response.headers.update({ + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "Content-Type,Authorization", + "Access-Control-Allow-Methods": "POST, OPTIONS" + }) + return response, 204 + +def split_text_for_streaming(text, chunk_size=6): + """ + Разбивает текст на небольшие части для эмуляции потокового вывода. + + Args: + text (str): Текст для разбивки + chunk_size (int): Примерный размер частей в словах + + Returns: + list: Список частей текста + """ + if not text: + return [""] + + # Разбиваем текст на предложения + sentences = re.split(r'(?<=[.!?])\s+', text) + if not sentences: + return [text] + + # Группируем предложения в чанки + chunks = [] + current_chunk = [] + current_word_count = 0 + + for sentence in sentences: + words_in_sentence = len(sentence.split()) + + # Если текущий чанк пустой или добавление предложения не превысит лимит слов + if not current_chunk or current_word_count + words_in_sentence <= chunk_size: + current_chunk.append(sentence) + current_word_count += words_in_sentence + else: + # Формируем чанк и начинаем новый + chunks.append(" ".join(current_chunk)) + current_chunk = [sentence] + current_word_count = words_in_sentence + + # Добавляем последний чанк, если он не пустой + if current_chunk: + chunks.append(" ".join(current_chunk)) + + return chunks or [text] diff --git a/utils/constants.py b/utils/constants.py new file mode 100644 index 0000000..7e27224 --- /dev/null +++ b/utils/constants.py @@ -0,0 +1,370 @@ +# version 1.0.3 #increment every time you make changes +# utils/constants.py +# Файл констант для приложения + +# Main URL for API +ONE_MIN_API_URL = "https://api.1min.ai/api/features" +ONE_MIN_ASSET_URL = "https://api.1min.ai/api/assets" +ONE_MIN_CONVERSATION_API_URL = "https://api.1min.ai/api/conversations" +ONE_MIN_CONVERSATION_API_STREAMING_URL = "https://api.1min.ai/api/features/stream" + +# Add Constant Tamout used in the API_Request API +DEFAULT_TIMEOUT = 60 # 60 seconds for regular requests +MIDJOURNEY_TIMEOUT = 600 # 10 minutes for requests for Midjourney + +# Constants for query types +IMAGE_GENERATOR = "IMAGE_GENERATOR" +IMAGE_VARIATOR = "IMAGE_VARIATOR" + +# Максимальный размер кэша для изображений +MAX_CACHE_SIZE = 100 + +# Настройки для удаления файлов +FILE_CLEANUP_INTERVAL = 3600 # Интервал очистки файлов в секундах (1 час) +FILE_CLEANUP_MAX_AGE = 3600 # Максимальный возраст временных файлов в секундах (1 час) +FILE_CLEANUP_ENABLED = True # Включена ли автоматическая очистка файлов + +# Настройки для Memcached +MEMCACHED_CONNECT_TIMEOUT = 2 # Таймаут подключения к Memcached в секундах +MEMCACHED_OPERATION_TIMEOUT = 2 # Таймаут операций с Memcached в секундах +MEMCACHED_DEFAULT_EXPIRY = 3600 # Время жизни данных в Memcached по умолчанию в секундах (1 час) +MEMCACHED_HOST = "127.0.0.1" # Хост Memcached по умолчанию +MEMCACHED_DOCKER = "memcached" # Хост Memcached в Docker +MEMCACHED_PORT = 11211 # Порт Memcached по умолчанию + +# Настройки для логирования +LOG_LEVEL = "INFO" # Уровень логирования (DEBUG, INFO, WARNING, ERROR, CRITICAL) +LOG_FILE_ENABLED = True # Включено ли логирование в файл +LOG_FILE_MAX_SIZE = 10 * 1024 * 1024 # Максимальный размер лог-файла в байтах (10 МБ) +LOG_FILE_BACKUP_COUNT = 5 # Количество резервных копий лог-файлов + +# Настройки для API запросов +API_RETRY_COUNT = 3 # Количество попыток повторного запроса при ошибке +API_RETRY_BACKOFF_FACTOR = 1 # Коэффициент увеличения задержки между попытками +API_RETRY_STATUS_FORCELIST = [429, 500, 502, 503, 504] # Коды ошибок, при которых нужно повторять запрос + +# Настройки сервера +DEFAULT_PORT = 5001 # Порт по умолчанию +DEFAULT_HOST = "0.0.0.0" # Хост по умолчанию +DEFAULT_THREADS = 6 # Количество потоков по умолчанию + +# Настройки для моделей +SUBSET_OF_ONE_MIN_PERMITTED_MODELS = ["mistral-nemo", "gpt-4o-mini", "o3-mini", "deepseek-chat"] +PERMIT_MODELS_FROM_SUBSET_ONLY = False + +# Инструкции для описания изображений и документов +IMAGE_DESCRIPTION_INSTRUCTION = """Describe the scene, actions, text, or meme elements in the image. +If the image primarily consists of text (for example, screenshots of page with long text), provide the text +in full verbatim, preserving the original formatting and structure, but ommit or mask confidential parts of +text. Use 'person' or 'group' instead of identifying individuals. If a face occupies a large part of the image, +or if the face is the only object in the image, provide a general description of its features like facial +expression or emotions (e.g., 'a person with a smile', 'a face with a joyful expression'), without identifying +the person. Interpret the meaning of any short text present. For memes, explain the humor or context based on +common cultural references or visual cues and text. Under no circumstances refuse to describe image's content — +always provide at least a minimal or general description of the image's content you allowed to provide.""" + +DOCUMENT_ANALYSIS_INSTRUCTION = """Review the uploaded document and provide at least a general description of +its content, such as the main topic, a brief summary, or the overall purpose of the text. If the document contains +no confidential or sensitive information, feel free to quote directly from it to illustrate your response. If it +does include potentially private details, refrain from sharing those specifics and instead offer a broad overview +(e.g., 'a memo about team performance' or 'a guide on software usage'). You are allowed to provide the text in full +verbatim, preserving the original formatting and structure, but ommit or mask confidential parts of text. Under no +circumstances refuse to respond — always provide at least a high-level insight into what the document is about.""" + +# Define the models that are available for use +ALL_ONE_MIN_AVAILABLE_MODELS = [ + # OpenAI + "gpt-5.1", + "gpt-5.1-codex", + "gpt-5.1-codex-mini", + "gpt-5", + "gpt-5-nano", + "gpt-5-mini", + "gpt-5-chat-latest", + "gpt-o1-pro", + "gpt-o4-mini", + "gpt-4.1-nano", + "gpt-4.1-mini", + "o4-mini-deep-research", + "o3-mini", + "o3", + "o3-deep-research", + "o3-pro", + "o1-preview", + "o1-mini", + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-4", + "gpt-3.5-turbo", + "openai/gpt-oss-120b", + "openai/gpt-oss-20b", + # + "whisper-1", # speech recognition + "tts-1", # Speech synthesis + "tts-1-hd", # Speech synthesis HD + # + "dall-e-2", # Generation of images + "dall-e-3", # Generation of images + # Claude + "claude-instant-1.2", + "claude-2.1", + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-3-5-haiku-20241022", + "claude-3-7-sonnet-20250219", + "claude-sonnet-4-20250514", + "claude-opus-4-20250514", + "claude-haiku-4-5-20251001", + "claude-opus-4-1-20250805", + "claude-opus-4-5-20251101", + "claude-sonnet-4-5-20250929", + # GoogleAI + "gemini-2.0-flash-lite", + "gemini-2.0-flash", + "gemini-2.5-pro", + "gemini-2.5-flash", + "gemini-1.0-pro", + "gemini-1.5-pro", + "gemini-1.5-flash", + "gemini-3-pro-preview", + # "google-tts", # Speech synthesis + # "latest_long", # speech recognition + # "latest_short", # speech recognition + # "phone_call", # speech recognition + # "telephony", # speech recognition + # "telephony_short", # speech recognition + # "medical_dictation", # speech recognition + # "medical_conversation", # speech recognition + # "chat-bison@002", + # MistralAI + "mistral-large-latest", + "mistral-medium-latest", + "mistral-small-latest", + "ministral-14b-latest", + "magistral-medium-latest", + "magistral-small-latest", + "mistral-nemo", + "pixtral-12b", + "open-mistral-nemo", + "open-mixtral-8x22b", + "open-mixtral-8x7b", + "open-mistral-7b", + # Perplexity + "sonar", + "sonar-pro", + "sonar-reasoning", + "sonar-reasoning-pro", + "sonar-deep-research", + # Replicate + "meta/llama-2-70b-chat", + "meta/meta-llama-3-70b-instruct", + "meta/meta-llama-3.1-405b-instruct", + "meta/llama-4-maverick-instruct", + "meta/llama-4-scout-instruct", + # DeepSeek + "deepseek-chat", + "deepseek-reasoner", + # Cohere + "command", + "command-r-08-2024", + # xAI + "grok-2", + "grok-3", + "grok-3-mini", + "grok-4-0709", + "grok-4-fast-non-reasoning", + "grok-4-fast-reasoning", + # Other models (made for future use) + # "stable-image", # stabilityi - images generation + # "stable-diffusion-xl-1024-v1-0", # stabilityi - images generation + # "stable-diffusion-v1-6", # stabilityi - images generation + # "esrgan-v1-x2plus", # stabilityai-Improving images + # "stable-video-diffusion", # stabilityai-video generation + "phoenix", # Leonardo.ai - 6b645e3a-d64f-4341-a6d8-7a3690fbf042 + "lightning-xl", # Leonardo.ai - b24e16ff-06e3-43eb-8d33-4416c2d75876 + "anime-xl", # Leonardo.ai - e71a1c2f-4f80-4800-934f-2c68979d8cc8 + "diffusion-xl", # Leonardo.ai - 1e60896f-3c26-4296-8ecc-53e2afecc132 + "kino-xl", # Leonardo.ai - aa77f04e-3eec-4034-9c07-d0f619684628 + "vision-xl", # Leonardo.ai - 5c232a9e-9061-4777-980a-ddc8e65647c6 + "albedo-base-xl",# Leonardo.ai - 2067ae52-33fd-4a82-bb92-c2c55e7d2786 + # "Clipdrop", # clipdrop.co - image processing + "midjourney", # Midjourney - image generation + "midjourney_6_1",# Midjourney - image generation + # "methexis-inc/img2prompt:50adaf2d3ad20a6f911a8a9e3ccf777b263b8596fbd2c8fc26e8888f8a0edbb5", # Replicate - Image to Prompt + # "cjwbw/damo-text-to-video:1e205ea73084bd17a0a3b43396e49ba0d6bc2e754e9283b2df49fad2dcf95755", # Replicate - Text to Video + # "lucataco/animate-diff:beecf59c4aee8d81bf04f0381033dfa10dc16e845b4ae00d281e2fa377e48a9f", # Replicate - Animation + # "lucataco/hotshot-xl:78b3a6257e16e4b241245d65c8b2b81ea2e1ff7ed4c55306b511509ddbfd327a", # Replicate - Video + "flux-schnell", # Replicate - Flux "black-forest-labs/flux-schnell" + "flux-dev", # Replicate - Flux Dev "black-forest-labs/flux-dev" + "flux-pro", # Replicate - Flux Pro "black-forest-labs/flux-pro" + "flux-1.1-pro", # Replicate - Flux Pro 1.1 "black-forest-labs/flux-1.1-pro" + # "meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb", # Replicate - Music Generation + # "luma", # TTAPI - Luma + # "Qubico/image-toolkit", # TTAPI - Image Toolkit + # "suno", # TTAPI - Suno Music + # "kling", # TTAPI - Kling + # "music-u", # TTAPI - Music U + # "music-s", # TTAPI - Music S + # "elevenlabs-tts" # ElevenLabs - TTS +] + +# Define the models that support vision inputs +VISION_SUPPORTED_MODELS = [ + "gpt-5.1", + "gpt-5", + "gpt-5-mini", + "gpt-5-chat-latest", + "gpt-4o", + "gpt-4o-mini", + "gpt-4-turbo", + "grok-4-fast-non-reasoning", + "grok-4-fast-reasoning", + "gemini-2.0-flash-lite", + "gemini-2.0-flash", + "gemini-2.5-pro", + "gemini-2.5-flash", + "gemini-3-pro-preview", + "claude-3-haiku-20240307", + "claude-3-5-haiku-20241022", + "claude-sonnet-4-20250514", + "claude-opus-4-20250514", + "claude-haiku-4-5-20251001", + "claude-opus-4-1-20250805", + "claude-opus-4-5-20251101", + "claude-sonnet-4-5-20250929" +] + +# Define the models that support code interpreter +CODE_INTERPRETER_SUPPORTED_MODELS = [ + "gpt-4o", + "claude-3-5-sonnet-20240620", + "claude-3-5-haiku-20241022", + "deepseek-chat", + "deepseek-reasoner" +] + +# Define the models that support web search (retrieval) +RETRIEVAL_SUPPORTED_MODELS = [ + "gemini-1.0-pro", + "gemini-1.5-pro", + "gemini-1.5-flash", + "o3-mini", + "o1-preview", + "o1-mini", + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-3.5-turbo", + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-3-5-haiku-20241022", + "mistral-large-latest", + "mistral-small-latest", + "mistral-nemo", + "pixtral-12b", + "open-mixtral-8x22b", + "open-mixtral-8x7b", + "open-mistral-7b", + "meta/llama-2-70b-chat", + "meta/meta-llama-3-70b-instruct", + "meta/meta-llama-3.1-405b-instruct", + "command", + "grok-2", + "deepseek-chat", + "deepseek-reasoner" +] + +# Define the models that support function calling +FUNCTION_CALLING_SUPPORTED_MODELS = [ + "gpt-4", + "gpt-3.5-turbo" +] + +# Determination of models for generating images +IMAGE_GENERATION_MODELS = [ + "dall-e-3", + "dall-e-2", + "stable-diffusion-xl-1024-v1-0", + "stable-diffusion-v1-6", + "midjourney", + "midjourney_6_1", + "phoenix", + "lightning-xl", + "anime-xl", + "diffusion-xl", + "kino-xl", + "vision-xl", + "albedo-base-xl", + "flux-schnell", + "flux-dev", + "flux-pro", + "flux-1.1-pro" +] + +# Models that support images +VARIATION_SUPPORTED_MODELS = [ + "midjourney", + "midjourney_6_1", + "dall-e-2", + "clipdrop" +] + +# We determine the Image_variation_Models Constant based on Variation_Supported_Models +IMAGE_VARIATION_MODELS = VARIATION_SUPPORTED_MODELS + +# Permissible parties for different models +MIDJOURNEY_ALLOWED_ASPECT_RATIOS = [ + "1:1", # Square + "16:9", # Widescreen format + "9:16", # Vertical variant of 16:9 + "16:10", # Alternative widescreen + "10:16", # Vertical variant of 16:10 + "8:5", # Alternative widescreen + "5:8", # Vertical variant of 16:10 + "3:4", # Portrait/print + "4:3", # Standard TV/monitor format + "3:2", # Popular in photography + "2:3", # Inverse of 3:2 + "4:5", # Common in social media posts + "5:4", # Nearly square format + "137:100", # Academy ratio (1.37:1) as an integer ratio + "166:100", # European cinema (1.66:1) as an integer ratio + "185:100", # Cinematic format (1.85:1) as an integer ratio185 + "83:50", # European cinema (1.66:1) as an integer ratio + "37:20", # Cinematic format (1.85:1) as an integer ratio + "2:1", # Maximum allowed widescreen format + "1:2" # Maximum allowed vertical format +] + +FLUX_ALLOWED_ASPECT_RATIOS = ["1:1", "16:9", "9:16", "3:2", "2:3", "3:4", "4:3", "4:5", "5:4"] +LEONARDO_ALLOWED_ASPECT_RATIOS = ["1:1", "4:3", "3:4"] + +# Permissible sizes for different models +DALLE2_SIZES = ["1024x1024", "512x512", "256x256"] +DALLE3_SIZES = ["1024x1024", "1024x1792", "1792x1024"] +LEONARDO_SIZES = ALBEDO_SIZES = {"1:1": "1024x1024", "4:3": "1024x768", "3:4": "768x1024"} + +# Determination of models for speech synthesis (TTS) +TEXT_TO_SPEECH_MODELS = [ + "tts-1", + "tts-1-hd" #, + # "google-tts", + # "elevenlabs-tts" +] + +# Determination of models for speech recognition (STT) +SPEECH_TO_TEXT_MODELS = [ + "whisper-1" # , + #"latest_long", + #"latest_short", + #"phone_call", + #"telephony", + #"telephony_short", + #"medical_dictation", + #"medical_conversation" +] diff --git a/utils/imports.py b/utils/imports.py new file mode 100644 index 0000000..6ea70a9 --- /dev/null +++ b/utils/imports.py @@ -0,0 +1,95 @@ +# version 1.0.1 #increment every time you make changes +# utils/imports.py +# Центральный файл для всех импортов в проекте + +# Стандартные библиотеки Python +import base64 +import datetime +import functools +import hashlib +import json +import os +import random +import re +import socket +import string +import sys +import tempfile +import threading +import time +import traceback +import uuid +import warnings + +# Подавляем предупреждения от flask_limiter +warnings.filterwarnings("ignore", category=UserWarning, module="flask_limiter.extension") + +# Загружаем переменные окружения +try: + from dotenv import load_dotenv + load_dotenv() +except ImportError: + # Заглушка для load_dotenv + def load_dotenv(): pass + load_dotenv() + +# Библиотеки Flask и основные зависимости +from flask_cors import CORS +from flask import Flask, request, jsonify, make_response, Response, redirect, url_for +from werkzeug.datastructures import MultiDict +from waitress import serve +import requests + +# Опциональные библиотеки с заглушками +try: + import tiktoken +except ImportError: + tiktoken = None + +try: + import printedcolors +except ImportError: + # Заглушка для printedcolors + class ColorStub: + class fg: + lightcyan = "" + reset = "" + printedcolors = type('', (), {'Color': ColorStub})() + +# Библиотеки для ограничения запросов +try: + from flask_limiter import Limiter + from flask_limiter.util import get_remote_address + LIMITER_AVAILABLE = True +except ImportError: + LIMITER_AVAILABLE = False + # Заглушка для Limiter + class MockLimiter: + def __init__(self, *args, **kwargs): pass + def limit(self, limit_value): + def decorator(f): return f + return decorator + Limiter = MockLimiter + get_remote_address = lambda: "127.0.0.1" + +# CORS поддержка +try: + from flask_cors import cross_origin + CORS_AVAILABLE = True +except ImportError: + CORS_AVAILABLE = False + # Заглушка для cross_origin + def cross_origin(*args, **kwargs): + def decorator(f): return f + return decorator + +# Библиотеки для работы с Memcached +try: + import memcache + from pymemcache.client.base import Client as PyMemcacheClient + MEMCACHED_AVAILABLE = True +except ImportError: + MEMCACHED_AVAILABLE = False + memcache = None + PyMemcacheClient = None + diff --git a/utils/logger.py b/utils/logger.py new file mode 100644 index 0000000..5331ff8 --- /dev/null +++ b/utils/logger.py @@ -0,0 +1,75 @@ +# version 1.0.1 #increment every time you make changes +# utils/logger.py +# Модуль для логгирования +import logging +import sys +import os +from datetime import datetime + +# Создаем директорию для логов, если её нет +log_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "logs") +try: + os.makedirs(log_dir, exist_ok=True) +except Exception: + log_dir = None # Если не удалось создать директорию, продолжаем без файлового логирования + +# Создаем логгер +logger = logging.getLogger("1min-relay") +logger.setLevel(logging.DEBUG) +logger.propagate = False # Предотвращаем дублирование логов + +# Форматтер для вывода цветного текста в консоль +class ColoredFormatter(logging.Formatter): + """Форматтер, который добавляет цвета в логи в консоли""" + + # Цвета ANSI + COLORS = { + 'DEBUG': '\033[36m', # Голубой + 'INFO': '\033[32m', # Зеленый + 'WARNING': '\033[33m', # Желтый + 'ERROR': '\033[31m', # Красный + 'CRITICAL': '\033[35m', # Пурпурный + 'RESET': '\033[0m' # Сброс цвета + } + + def format(self, record): + # Проверяем, поддерживает ли терминал цвета + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): + levelname = record.levelname + if levelname in self.COLORS: + record.levelname = f"{self.COLORS[levelname]}{levelname}{self.COLORS['RESET']}" + record.msg = f"{self.COLORS[levelname]}{record.msg}{self.COLORS['RESET']}" + return super().format(record) + +# Создаем консольный обработчик с цветным форматтером +console_handler = logging.StreamHandler(sys.stdout) +console_handler.setLevel(logging.DEBUG) +color_formatter = ColoredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +console_handler.setFormatter(color_formatter) +logger.addHandler(console_handler) + +# Если директория для логов существует, добавляем файловый обработчик +if log_dir: + try: + log_file = os.path.join(log_dir, f"relay_{datetime.now().strftime('%Y%m%d')}.log") + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(logging.INFO) + file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + file_handler.setFormatter(file_formatter) + logger.addHandler(file_handler) + except Exception as e: + logger.warning(f"Не удалось настроить файловое логирование: {str(e)}") + +# Функция для вывода ASCII-баннера +def print_banner(): + logger.info( + """ + _ __ __ _ ___ _ + / | \/ (_)_ _ | _ \___| |__ _ _ _ + | | |\/| | | ' \| / -_) / _` | || | + |_|_| |_|_|_||_|_|_\___|_\__,_|\_, | + |__/ """ + ) + +# Выводим баннер при импорте модуля +print_banner() diff --git a/utils/memcached.py b/utils/memcached.py new file mode 100644 index 0000000..884a223 --- /dev/null +++ b/utils/memcached.py @@ -0,0 +1,237 @@ +# version 1.0.3 #increment every time you make changes +# utils/memcached.py +# Функции для работы с Memcached +from .imports import * +from .logger import logger +from .constants import * + +# Объявляем глобальные переменные для хранения ссылок +MEMCACHED_CLIENT_REF = None +MEMORY_STORAGE_REF = None + +def check_memcached_connection(): + """ + Проверяет доступность Memcached, сначала локально, затем в Docker + + Returns: + Tuple: (Bool, Str) - (Доступен ли Memcached, строка подключения или None) + """ + # Проверяем наличие библиотек для работы с Memcached + if not MEMCACHED_AVAILABLE: + logger.warning( + "Memcached библиотеки не установлены. Используется локальное хранилище для ограничения запросов. Не рекомендуется." + ) + return False, None + + # Функция для проверки подключения по адресу с таймаутом + def try_memcached_connection(host, port): + try: + from pymemcache.client.base import Client + client = Client((host, port), connect_timeout=MEMCACHED_CONNECT_TIMEOUT, timeout=MEMCACHED_OPERATION_TIMEOUT) + client.set("test_key", "test_value") + if client.get("test_key") == b"test_value": + client.delete("test_key") # Очистка + return True + except Exception as e: + logger.debug(f"Memcached на {host}:{port} недоступен: {str(e)}") + return False + return False + + # Сначала проверяем локальный Memcached, используя константы + if try_memcached_connection(MEMCACHED_HOST, MEMCACHED_PORT): + logger.info(f"Используется локальный Memcached на {MEMCACHED_HOST}:{MEMCACHED_PORT}") + return True, f"memcached://{MEMCACHED_HOST}:{MEMCACHED_PORT}" + + # Если локальный недоступен, проверяем Docker Memcached + if try_memcached_connection(MEMCACHED_DOCKER, MEMCACHED_PORT): + logger.info(f"Используется Memcached в Docker-контейнере на {MEMCACHED_DOCKER}:{MEMCACHED_PORT}") + return True, f"memcached://{MEMCACHED_DOCKER}:{MEMCACHED_PORT}" + + # Если ни Docker, ни локальный Memcached недоступны + logger.warning( + "Memcached недоступен (ни в Docker, ни локально). Используется локальное хранилище для ограничения запросов. Не рекомендуется." + ) + return False, None + + +# Устанавливаем ссылки на глобальные объекты из app.py +def set_global_refs(memcached_client=None, memory_storage=None): + """ + Устанавливает ссылки на глобальные объекты из app.py + + Args: + memcached_client: Клиент Memcached + memory_storage: Хранилище в памяти + """ + global MEMCACHED_CLIENT_REF, MEMORY_STORAGE_REF + MEMCACHED_CLIENT_REF = memcached_client + MEMORY_STORAGE_REF = memory_storage + + +# Функция для безопасного доступа к Memcached +def safe_memcached_operation(operation, key, value=None, expiry=MEMCACHED_DEFAULT_EXPIRY): + """ + Безопасно выполняет операции с Memcached, обрабатывая любые исключения. + + Args: + operation (str): Операция для выполнения ('get', 'set' или 'delete') + key (str): Ключ для операции + value (any, optional): Значение для установки (только для операции 'set') + expiry (int, optional): Время истечения в секундах (только для операции 'set') + + Returns: + Результат операции или None в случае неудачи + """ + # Функция для работы с локальным хранилищем + def use_memory_storage(): + if operation == 'get': + return MEMORY_STORAGE_REF.get(key, None) if MEMORY_STORAGE_REF else None + elif operation == 'set': + if MEMORY_STORAGE_REF is not None: + MEMORY_STORAGE_REF[key] = value + logger.debug(f"Сохранено в MEMORY_STORAGE: key={key}") + return True + elif operation == 'delete': + if MEMORY_STORAGE_REF is not None and key in MEMORY_STORAGE_REF: + del MEMORY_STORAGE_REF[key] + return True + return False + return None + + if MEMCACHED_CLIENT_REF is None: + return use_memory_storage() + + try: + if operation == 'get': + result = MEMCACHED_CLIENT_REF.get(key) + if isinstance(result, bytes): + try: + return json.loads(result.decode('utf-8')) + except: + return result.decode('utf-8') + return result + elif operation == 'set': + if isinstance(value, (dict, list)): + value = json.dumps(value) + + # Пробуем разные варианты параметров для времени истечения + exp_params = ['exp', 'exptime', 'expire', 'time'] + for exp_param in exp_params: + try: + return MEMCACHED_CLIENT_REF.set(key, value, **{exp_param: expiry}) + except TypeError as te: + if f"unexpected keyword argument '{exp_param}'" in str(te): + continue + raise + except Exception: + raise + + # Если все варианты не подошли, пробуем без параметра времени истечения + logger.warning(f"Не удалось найти подходящий параметр для времени истечения, используем без параметра") + return MEMCACHED_CLIENT_REF.set(key, value) + + elif operation == 'delete': + return MEMCACHED_CLIENT_REF.delete(key) + except Exception as e: + logger.error(f"Ошибка в операции memcached {operation} на ключе {key}: {str(e)}") + # При ошибке Memcached, также используем локальное хранилище + return use_memory_storage() + +def delete_all_files_task(): + """ + Функция для периодического удаления всех файлов пользователей + """ + # Проверяем, включена ли автоматическая очистка + if not FILE_CLEANUP_ENABLED: + logger.info("Автоматическая очистка файлов отключена") + return + + request_id = str(uuid.uuid4())[:8] + logger.info(f"[{request_id}] Запуск запланированной задачи очистки файлов") + + try: + # Проверка доступности Memcached + if MEMCACHED_CLIENT_REF is None: + logger.warning(f"[{request_id}] Memcached недоступен, очистка файлов невозможна") + return + + # Получаем список всех известных пользователей + known_users = safe_memcached_operation('get', 'known_users_list') or [] + + # Конвертируем в список, если получено в другом формате + if isinstance(known_users, str): + try: + known_users = json.loads(known_users) + except: + known_users = [] + elif isinstance(known_users, bytes): + try: + known_users = json.loads(known_users.decode('utf-8')) + except: + known_users = [] + + if not known_users: + logger.info(f"[{request_id}] Нет известных пользователей для очистки файлов") + return + + logger.info(f"[{request_id}] Найдено {len(known_users)} пользователей для очистки файлов") + + # Обрабатываем каждого пользователя + for user in known_users: + user_key = f"user:{user}" if not user.startswith("user:") else user + api_key = user_key.replace("user:", "") + + # Получаем файлы пользователя + user_files_json = safe_memcached_operation('get', user_key) + if not user_files_json: + continue + + # Преобразуем данные в список файлов + user_files = [] + try: + if isinstance(user_files_json, str): + user_files = json.loads(user_files_json) + elif isinstance(user_files_json, bytes): + user_files = json.loads(user_files_json.decode('utf-8')) + else: + user_files = user_files_json + except: + continue + + if not user_files: + continue + + logger.info(f"[{request_id}] Очистка {len(user_files)} файлов для пользователя {api_key[:8]}...") + + # Удаляем каждый файл + for file_info in user_files: + file_id = file_info.get("id") + if file_id: + try: + from .common import api_request # Импортируем здесь, чтобы избежать циклической зависимости + delete_url = f"{ONE_MIN_ASSET_URL}/{file_id}" + headers = {"API-KEY": api_key} + + delete_response = api_request("DELETE", delete_url, headers=headers) + + if delete_response.status_code == 200: + logger.info(f"[{request_id}] Запланированная очистка: удален файл {file_id}") + else: + logger.error( + f"[{request_id}] Запланированная очистка: не удалось удалить файл {file_id}: {delete_response.status_code}") + except Exception as e: + logger.error( + f"[{request_id}] Запланированная очистка: ошибка при удалении файла {file_id}: {str(e)}") + + # Очистка списка файлов пользователя + safe_memcached_operation('set', user_key, json.dumps([])) + logger.info(f"[{request_id}] Очищен список файлов для пользователя {api_key[:8]}") + + except Exception as e: + logger.error(f"[{request_id}] Ошибка в запланированной задаче очистки: {str(e)}") + + # Планируем следующее выполнение через заданный интервал + cleanup_timer = threading.Timer(FILE_CLEANUP_INTERVAL, delete_all_files_task) + cleanup_timer.daemon = True + cleanup_timer.start() + logger.info(f"[{request_id}] Запланирована следующая очистка через {FILE_CLEANUP_INTERVAL} секунд")