-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy path.env.example
More file actions
163 lines (122 loc) · 5.51 KB
/
.env.example
File metadata and controls
163 lines (122 loc) · 5.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# ============================================================================
# Open Science Assistant (OSA) - Environment Configuration
# ============================================================================
# Copy this file to .env and fill in your values
# NEVER commit .env to version control
# ============================================================================
# LLM Provider Configuration
# ============================================================================
# Primary LLM provider: "openrouter" (recommended) or "ollama"
LLM_PROVIDER=openrouter
# OpenRouter API key (required for openrouter provider)
# Get yours at: https://openrouter.ai/keys
OPENROUTER_API_KEY=
# Model selection (OpenRouter model IDs, format: provider/model)
# Examples:
# - openai/gpt-oss-120b (fast with Cerebras provider)
# - openai/gpt-4.1-mini (cheap)
# - anthropic/claude-3-5-sonnet (quality)
# - google/gemini-2.5-flash (cheap)
DEFAULT_MODEL=openai/gpt-oss-120b
# Provider preference for routing (e.g., Cerebras, Azure, Fireworks)
# See: https://openrouter.ai/docs/guides/routing/provider-selection
DEFAULT_MODEL_PROVIDER=Cerebras
# Model for testing (same format as DEFAULT_MODEL)
TEST_MODEL=openai/gpt-oss-120b
TEST_MODEL_PROVIDER=Cerebras
# API key specifically for integration tests (pytest -m llm)
# This is separate from the main key to allow CI/CD testing
OPENROUTER_API_KEY_FOR_TESTING=
# Ollama configuration (for local development)
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3.2
# LLM Temperature (0.0 - 1.0)
LLM_TEMPERATURE=0.1
# ============================================================================
# API Security Configuration
# ============================================================================
# API keys for server authentication (comma-separated list)
# Generate with: python -c "import secrets; print(secrets.token_urlsafe(32))"
API_KEYS=
# Require API key authentication (disable for local dev only)
REQUIRE_API_AUTH=true
# Allow Bring-Your-Own-Key mode (users provide their own OpenRouter key)
ALLOW_BYOK=true
# ============================================================================
# Database Configuration
# ============================================================================
# PostgreSQL connection URL for LangGraph state and telemetry
# Format: postgresql://user:password@host:port/database
DATABASE_URL=postgresql://osa:osa_dev@localhost:5432/osa
# ============================================================================
# LangFuse Observability
# ============================================================================
# LangFuse configuration for tracing and observability
# Get keys at: https://cloud.langfuse.com or self-host
LANGFUSE_PUBLIC_KEY=
LANGFUSE_SECRET_KEY=
LANGFUSE_HOST=https://cloud.langfuse.com
# Environment label for LangFuse (production, staging, development)
LANGFUSE_ENVIRONMENT=development
# ============================================================================
# External API Keys
# ============================================================================
# GitHub API token for issue/PR search and feedback integration
# Scopes needed: public_repo (or repo for private repos)
GITHUB_TOKEN=
# GitHub repository for feedback integration (owner/repo format)
GITHUB_REPO=neuromechanist/osa
# ============================================================================
# Knowledge Sync Configuration
# ============================================================================
# Paper source API keys (optional, for higher rate limits)
# OpenAlex: https://docs.openalex.org/how-to-use-the-api/api-key
# Premium key allows ~2M requests. Major source for paper metadata and citations.
OPENALEX_API_KEY=
# Email for OpenAlex polite pool (fallback if no API key)
OPENALEX_EMAIL=
# Semantic Scholar: https://www.semanticscholar.org/product/api
SEMANTIC_SCHOLAR_API_KEY=
# PubMed/NCBI: https://www.ncbi.nlm.nih.gov/account/settings/
PUBMED_API_KEY=
# Enable/disable automated knowledge sync
SYNC_ENABLED=true
# Sync schedules (cron expressions, UTC timezone)
# GitHub issues/PRs: daily at 2am UTC
SYNC_GITHUB_CRON=0 2 * * *
# Academic papers: weekly on Sunday at 3am UTC
SYNC_PAPERS_CRON=0 3 * * 0
# Data directory for knowledge database (used in Docker deployments)
# Default: platform-specific user data directory
# DATA_DIR=/app/data
# ============================================================================
# API Server Configuration
# ============================================================================
# API host and port
API_HOST=0.0.0.0
API_PORT=38528
# Root path for reverse proxy (e.g., /osa for api.osc.earth/osa/)
ROOT_PATH=/osa
# Dev root path (for api.osc.earth/osa-dev/)
DEV_ROOT_PATH=/osa-dev
# Number of Uvicorn workers (production)
API_WORKERS=4
# CORS: Additional allowed origins (comma-separated)
# Localhost origins are allowed by default in development
EXTRA_CORS_ORIGINS=
# Disable localhost CORS in production
ALLOW_LOCALHOST_CORS=true
# ============================================================================
# Telemetry and Feedback
# ============================================================================
# Enable detailed telemetry recording
ENABLE_TELEMETRY=true
# Feedback storage directory
FEEDBACK_DIR=./feedback
# ============================================================================
# Development Settings
# ============================================================================
# Enable debug mode (verbose logging, hot reload)
DEBUG=false
# Log level: DEBUG, INFO, WARNING, ERROR
LOG_LEVEL=INFO