forked from shanevcantwell/langgraph-agentic-scaffold
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.yaml
More file actions
139 lines (123 loc) · 6.54 KB
/
config.yaml
File metadata and controls
139 lines (123 loc) · 6.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# SpecialistHub Configuration Example
# Version: 3.0
#
# ==============================================================================
# DEVELOPER-LEVEL CONFIGURATION
# This file is the system's blueprint, intended for developers to define the
# agent's architecture. It should NOT be exposed to or editable by end-users.
# ==============================================================================
#
# This file defines the structure of the agentic system. It is the single
# source of truth for wiring together providers, models, and specialists.
# Copy this file to `config.yaml` and customize it for your setup.
# ==============================================================================
# LLM PROVIDER CONFIGURATION
# ==============================================================================
# Define the available LLM providers. The keys here (e.g., 'lmstudio') are
# referenced by the specialists below.
# The application will read environment variables defined in .env for secrets.
#
# Supported providers: "lmstudio", "ollama", "gemini"
# ==============================================================================
llm_providers:
# A configuration for a local model via LM Studio
local_lmstudio:
# Uses the LMSTUDIO_BASE_URL from your .env file
# Example: LMSTUDIO_BASE_URL=http://localhost:1234/v1
type: "lmstudio"
api_identifier: "openai-oss-22b" # The exact model identifier for the server
# A configuration for a local model via Ollama
local_ollama:
# Uses OLLAMA_BASE_URL and OLLAMA_MODEL from your .env file
# Example: OLLAMA_BASE_URL=http://localhost:11434
type: "ollama"
api_identifier: "gemma3:27b" # The model name registered with Ollama
# A configuration for a powerful, general-purpose Gemini model
gemini_pro:
# Uses GEMINI_API_KEY from your .env file
type: "gemini"
api_identifier: "gemini-2.5-pro"
# Future parameters could go here, e.g., temperature: 0.5
# A configuration for a fast, cheaper Gemini model
gemini_flash:
# Uses GEMINI_API_KEY from your .env file
type: "gemini"
api_identifier: "gemini-2.5-flash"
# ==============================================================================
# WORKFLOW CONFIGURATION
# ==============================================================================
# Define the entry point for the graph. Defaults to 'router_specialist'.
# ==============================================================================
workflow:
entry_point: "prompt_triage_specialist"
# ==============================================================================
# SPECIALIST CONFIGURATION
# ==============================================================================
# Define each specialist agent. The key for each specialist (e.g., 'router_specialist')
# must match the specialist's Python module name (router_specialist.py) and the
# name passed to super().__init__() in its constructor.
# ==============================================================================
specialists:
# --- Orchestration & Planning ---
router_specialist:
type: "llm"
prompt_file: "router_prompt.md"
description: "The master router and planner. It analyzes the user's request and routes it to the appropriate specialist. Its model should be capable of reliable tool/function calling."
systems_architect:
type: "llm"
prompt_file: "systems_architect_prompt.md"
description: "Analyzes a user's request and creates a high-level technical plan. Produces a 'system_plan' artifact. This is a good first step for complex tasks."
# --- Core Capabilities ---
web_builder:
type: "llm"
prompt_file: "web_builder_prompt.md"
description: "Takes a 'system_plan' artifact and generates a self-contained HTML document based on it. Requires a 'system_plan' to be present in the state."
code_writer_specialist:
type: "llm"
prompt_file: "code_writer_prompt.md"
description: "A specialist that writes clean, efficient Python code based on a user's request."
file_specialist:
type: "llm"
prompt_file: "file_specialist_prompt.md"
description: "Performs file system operations like read, write, and list directories using a Pydantic schema. Write operations are protected by a safety lock."
root_dir: "./workspace" # Defaults to a 'workspace' folder in the project root.
# --- Data & Analysis ---
data_extractor_specialist:
type: "llm"
prompt_file: "data_extractor_prompt.md" # Use a prompt designed for Pydantic/JSON schema output.
description: "Extracts structured data from unstructured text based on a Pydantic schema. Requires 'text_to_process' in the state."
sentiment_classifier_specialist:
type: "llm"
prompt_file: "sentiment_classifier_prompt.md"
description: "Classifies the sentiment of a given text as positive, negative, or neutral."
data_processor_specialist:
type: "procedural"
description: "A specialist that performs deterministic data processing tasks, like formatting or cleaning, without calling an LLM."
text_analysis_specialist:
type: "llm"
prompt_file: "text_analysis_prompt.md"
description: "Analyzes, summarizes, or extracts information from a block of text. Use this after text has been retrieved by another specialist."
# --- General & Fallback ---
prompt_specialist:
type: "llm"
prompt_file: "prompt_specialist_prompt.md"
description: "A general-purpose specialist for direct Q&A and instruction following. It can be bound to a simple or advanced model in user_settings.yaml."
# --- Wrapped & External Specialists ---
open_swe_specialist:
type: "wrapped"
source: "./external/open-swe/agent/run.py" # Path relative to project root
class_name: "Agent" # The name of the class to instantiate from the source file.
description: "A specialist that wraps the open-swe agent for software engineering tasks."
archiver_specialist:
type: "procedural"
archive_path: "./archives" # The directory to save final run reports.
# Pruning strategy for the archive directory to prevent it from growing indefinitely.
# Options: "none", "count", "size".
pruning_strategy: "count"
pruning_max_count: 50 # Max number of reports to keep if strategy is "count".
# pruning_max_size_mb: 100 # Max total size in MB if strategy is "size".
description: "Summarizes the conversation and prepares the final report. This is the final step."
prompt_triage_specialist:
type: "llm"
prompt_file: "prompt_triage_prompt.md"
description: "A specialist that performs a pre-flight check on the user's initial prompt for sentiment, coherence, and actionability."