Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 41 additions & 1 deletion workflows/.env.example
Original file line number Diff line number Diff line change
@@ -1,2 +1,42 @@
# We support all langchain models, openai only for demo purposes
OPENAI_API_KEY=
LLM_PROVIDER="openai"
MODEL_NAME="gpt-4o"

OPENAI_ENDPOINT=https://api.openai.com/v1
OPENAI_API_KEY=

ANTHROPIC_API_KEY=
ANTHROPIC_ENDPOINT=https://api.anthropic.com

GOOGLE_API_KEY=

AZURE_OPENAI_ENDPOINT=
AZURE_OPENAI_API_KEY=
AZURE_OPENAI_API_VERSION=2025-01-01-preview

DEEPSEEK_ENDPOINT=https://api.deepseek.com
DEEPSEEK_API_KEY=

MISTRAL_API_KEY=
MISTRAL_ENDPOINT=https://api.mistral.ai/v1

OLLAMA_ENDPOINT=http://localhost:11434

ALIBABA_ENDPOINT=https://dashscope.aliyuncs.com/compatible-mode/v1
ALIBABA_API_KEY=

MOONSHOT_ENDPOINT=https://api.moonshot.cn/v1
MOONSHOT_API_KEY=

UNBOUND_ENDPOINT=https://api.getunbound.ai
UNBOUND_API_KEY=

SiliconFLOW_ENDPOINT=https://api.siliconflow.cn/v1/
SiliconFLOW_API_KEY=

IBM_ENDPOINT=https://us-south.ml.cloud.ibm.com
IBM_API_KEY=
IBM_PROJECT_ID=

GROK_ENDPOINT="https://api.x.ai/v1"
GROK_API_KEY=
36 changes: 25 additions & 11 deletions workflows/cli.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
from dotenv import load_dotenv
load_dotenv()
import asyncio
import json
import os
import subprocess
import tempfile # For temporary file handling
import webbrowser
from pathlib import Path
import os

import typer
from browser_use.browser.browser import Browser

# Assuming OPENAI_API_KEY is set in the environment
from langchain_openai import ChatOpenAI

from workflow_use.builder.service import BuilderService
from workflow_use.controller.service import WorkflowController
from workflow_use.mcp.service import get_mcp_server
from workflow_use.recorder.service import RecordingService # Added import
from workflow_use.workflow.service import Workflow
from workflow_use.llm.llm_provider import get_llm_model
from workflow_use.llm.config import model_names

# Placeholder for recorder functionality
# from src.recorder.service import RecorderService
Expand All @@ -31,15 +33,27 @@
# Default LLM instance to None
llm_instance = None
try:
llm_instance = ChatOpenAI(model='gpt-4o')
page_extraction_llm = ChatOpenAI(model='gpt-4o-mini')
# Get provider and model name from environment or default to openai
provider = os.getenv("LLM_PROVIDER", "openai").lower()
model_name = os.getenv("MODEL_NAME", "")

# If no model name specified, prompt user
if not model_name:
typer.echo(f"Available models for {provider}:")
typer.echo(f"{model_names[provider]}")

model_name = typer.prompt(f"model name for {provider}, default=", default=model_names[provider][0])
os.environ["MODEL_NAME"] = model_name

# Initialize LLM with selected provider and model
llm_instance = get_llm_model(provider, model_name=model_name)
page_extraction_llm = get_llm_model(provider, model_name=model_name)

except Exception as e:
typer.secho(f'Error initializing LLM: {e}. Would you like to set your OPENAI_API_KEY?', fg=typer.colors.RED)
set_openai_api_key = input('Set OPENAI_API_KEY? (y/n): ')
if set_openai_api_key.lower() == 'y':
os.environ['OPENAI_API_KEY'] = input('Enter your OPENAI_API_KEY: ')
llm_instance = ChatOpenAI(model='gpt-4o')
page_extraction_llm = ChatOpenAI(model='gpt-4o-mini')
typer.secho(
f"Error initializing LLM: {e}. Would you like to set your API key?",
fg=typer.colors.RED,
)

builder_service = BuilderService(llm=llm_instance) if llm_instance else None
# recorder_service = RecorderService() # Placeholder
Expand Down
Binary file added workflows/requirements.txt
Binary file not shown.
87 changes: 87 additions & 0 deletions workflows/workflow_use/llm/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Predefined model names for common providers
model_names = {
"anthropic": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
"deepseek": ["deepseek-chat", "deepseek-reasoner"],
"google": ["gemini-2.0-flash", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05",
"gemini-2.5-pro-preview-03-25", "gemini-2.5-flash-preview-04-17"],
"ollama": ["qwen2.5:7b", "qwen2.5:14b", "qwen2.5:32b", "qwen2.5-coder:14b", "qwen2.5-coder:32b", "llama2:7b",
"deepseek-r1:14b", "deepseek-r1:32b"],
"azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
"mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"],
"alibaba": ["qwen-plus", "qwen-max", "qwen-vl-max", "qwen-vl-plus", "qwen-turbo", "qwen-long"],
"moonshot": ["moonshot-v1-32k-vision-preview", "moonshot-v1-8k-vision-preview"],
"unbound": ["gemini-2.0-flash", "gpt-4o-mini", "gpt-4o", "gpt-4.5-preview"],
"grok": [
"grok-3",
"grok-3-fast",
"grok-3-mini",
"grok-3-mini-fast",
"grok-2-vision",
"grok-2-image",
"grok-2",
],
"siliconflow": [
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-V3",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-V2.5",
"deepseek-ai/deepseek-vl2",
"Qwen/Qwen2.5-72B-Instruct-128K",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2-1.5B-Instruct",
"Qwen/QwQ-32B-Preview",
"Qwen/Qwen2-VL-72B-Instruct",
"Qwen/Qwen2.5-VL-32B-Instruct",
"Qwen/Qwen2.5-VL-72B-Instruct",
"TeleAI/TeleChat2",
"THUDM/glm-4-9b-chat",
"Vendor-A/Qwen/Qwen2.5-72B-Instruct",
"internlm/internlm2_5-7b-chat",
"internlm/internlm2_5-20b-chat",
"Pro/Qwen/Qwen2.5-7B-Instruct",
"Pro/Qwen/Qwen2-7B-Instruct",
"Pro/Qwen/Qwen2-1.5B-Instruct",
"Pro/THUDM/chatglm3-6b",
"Pro/THUDM/glm-4-9b-chat",
],
"ibm": ["ibm/granite-vision-3.1-2b-preview", "meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
"meta-llama/llama-3-2-90b-vision-instruct"],
"modelscope":[
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2.5-Coder-14B-Instruct",
"Qwen/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/QwQ-32B-Preview",
"Qwen/Qwen2.5-VL-3B-Instruct",
"Qwen/Qwen2.5-VL-7B-Instruct",
"Qwen/Qwen2.5-VL-32B-Instruct",
"Qwen/Qwen2.5-VL-72B-Instruct",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-V3",
"Qwen/Qwen3-1.7B",
"Qwen/Qwen3-4B",
"Qwen/Qwen3-8B",
"Qwen/Qwen3-14B",
"Qwen/Qwen3-30B-A3B",
"Qwen/Qwen3-32B",
"Qwen/Qwen3-235B-A22B",
],
}
Loading