Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backend/.cursor/rules/run_pipelex.mdc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ from pipelex.pipelex import Pipelex
from pipelex.pipeline.execute import execute_pipeline
from pipelex.core.stuffs.image_content import ImageContent

from my_project.gantt.gantt_struct import GanttChart
from lesson_forge.gantt.gantt_struct import GanttChart

SAMPLE_NAME = "extract_gantt"
IMAGE_URL = "assets/gantt/gantt_tree_house.png"
Expand Down
2 changes: 1 addition & 1 deletion backend/.cursor/rules/write_pipelex.mdc
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ class Invoice(StructuredContent):
return v
```

**Location:** Create models in `my_project/some_domain/some_domain_struct.py`. Classes inheriting from `StructuredContent` are automatically discovered.
**Location:** Create models in `lesson_forge/some_domain/some_domain_struct.py`. Classes inheriting from `StructuredContent` are automatically discovered.

### Decision Rules for Agents

Expand Down
1 change: 1 addition & 0 deletions backend/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
PIPELEX_INFERENCE_API_KEY=
33 changes: 33 additions & 0 deletions backend/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@

# env
.env

# venv
.venv
venv/

# reports
reports/

# pycache
__pycache__

# Python build artifacts
build/*
*.egg-info/
dist/
*.pyc

# mypy
.mypy_cache/

# Ruff
.ruff_cache/

# Results
results/

# temps
temp/
pipelex_super.toml
base_llm_deck.toml
58 changes: 58 additions & 0 deletions backend/.pipelex/inference/backends.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
[pipelex_inference]
endpoint = "https://inference.pipelex.com/v1"
api_key = "${PIPELEX_INFERENCE_API_KEY}"

[azure_openai]
enabled = false
endpoint = "${AZURE_API_BASE}"
api_key = "${AZURE_API_KEY}"
api_version = "${AZURE_API_VERSION}"

[bedrock]
enabled = false
aws_region = "${AWS_REGION}"

[google]
enabled = false
api_key = "${GOOGLE_API_KEY}"

[vertexai]
enabled = false
gcp_project_id = "${GCP_PROJECT_ID}"
gcp_location = "${GCP_LOCATION}"
gcp_credentials_file_path = "${GCP_CREDENTIALS_FILE_PATH}"

[openai]
enabled = false
api_key = "${OPENAI_API_KEY}"

[anthropic]
enabled = false
api_key = "${ANTHROPIC_API_KEY}"
claude_4_tokens_limit = 8192

[mistral]
enabled = false
api_key = "${MISTRAL_API_KEY}"

[xai]
enabled = false
endpoint = "https://api.x.ai/v1"
api_key = "${XAI_API_KEY}"

[ollama]
enabled = false
endpoint = "http://localhost:11434/v1"

[blackboxai]
enabled = false
endpoint = "https://api.blackbox.ai/v1"
api_key = "${BLACKBOX_API_KEY}"

[fal]
enabled = false
api_key = "${FAL_API_KEY}"

[internal] # software-only backend, runs internally, without AI
enabled = true

88 changes: 88 additions & 0 deletions backend/.pipelex/inference/backends/anthropic.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
################################################################################
# Anthropic Backend Configuration
################################################################################
#
# This file defines the model specifications for Anthropic Claude models.
# It contains model definitions for various Claude language models
# accessible through the Anthropic API.
#
# Configuration structure:
# - Each model is defined in its own section with the model name as the header
# - Headers with dots must be quoted (e.g., ["claude-3.5-sonnet"])
# - Model costs are in USD per million tokens (input/output)
#
################################################################################

################################################################################
# MODEL DEFAULTS
################################################################################

[defaults]
model_type = "llm"
sdk = "anthropic"
prompting_target = "anthropic"

################################################################################
# LANGUAGE MODELS
################################################################################

# --- Claude 3 Series ----------------------------------------------------------
[claude-3-haiku]
model_id = "claude-3-haiku-20240307"
max_tokens = 4096
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 0.25, output = 1.25 }

[claude-3-opus]
model_id = "claude-3-opus-20240229"
max_tokens = 4096
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 15.0, output = 75.0 }

# --- Claude 3.7 Series --------------------------------------------------------
["claude-3.7-sonnet"]
model_id = "claude-3-7-sonnet-20250219"
max_tokens = 8192
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 3.0, output = 15.0 }

# --- Claude 4 Series ----------------------------------------------------------
[claude-4-sonnet]
model_id = "claude-sonnet-4-20250514"
max_tokens = 64000
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 3.0, output = 15.0 }

[claude-4-opus]
model_id = "claude-opus-4-20250514"
max_tokens = 32000
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 3.0, output = 15.0 }

# --- Claude 4.1 Series --------------------------------------------------------
["claude-4.1-opus"]
model_id = "claude-opus-4-1-20250805"
max_tokens = 32000
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 3.0, output = 15.0 }

# --- Claude 4.5 Series --------------------------------------------------------
["claude-4.5-sonnet"]
model_id = "claude-sonnet-4-5-20250929"
max_tokens = 64000
inputs = ["text", "images"]
outputs = ["text", "structured"]
max_prompt_images = 100
costs = { input = 3.0, output = 15.0 }
98 changes: 98 additions & 0 deletions backend/.pipelex/inference/backends/azure_openai.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
################################################################################
# Azure OpenAI Backend Configuration
################################################################################
#
# This file defines the model specifications for Azure OpenAI models.
# It contains model definitions for OpenAI models deployed on Azure
# accessible through the Azure OpenAI API.
#
# Configuration structure:
# - Each model is defined in its own section with the model name as the header
# - Headers with dots must be quoted (e.g., ["gpt-4.1"])
# - Model costs are in USD per million tokens (input/output)
#
################################################################################

################################################################################
# MODEL DEFAULTS
################################################################################

[defaults]
model_type = "llm"
sdk = "azure_openai"
prompting_target = "openai"

################################################################################
# LANGUAGE MODELS
################################################################################

# --- GPT-4o Series ------------------------------------------------------------
[gpt-4o]
model_id = "gpt-4o-2024-11-20"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 2.5, output = 10.0 }

[gpt-4o-mini]
model_id = "gpt-4o-mini-2024-07-18"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 0.15, output = 0.6 }

# --- GPT-4.1 Series -----------------------------------------------------------
["gpt-4.1"]
model_id = "gpt-4.1-2025-04-14"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 2, output = 8 }

["gpt-4.1-mini"]
model_id = "gpt-4.1-mini-2025-04-14"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 0.4, output = 1.6 }

["gpt-4.1-nano"]
model_id = "gpt-4.1-nano-2025-04-14"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 0.1, output = 0.4 }

# --- o Series ----------------------------------------------------------------
[o1-mini]
model_id = "o1-mini-2024-09-12"
inputs = ["text"]
outputs = ["text", "structured"]
costs = { input = 3.0, output = 12.0 }

[o1]
model_id = "o1-2024-12-17"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 15.0, output = 60.0 }

[o3-mini]
model_id = "o3-mini-2025-01-31"
inputs = ["text"]
outputs = ["text", "structured"]
costs = { input = 1.1, output = 4.4 }

# --- GPT-5 Series -------------------------------------------------------------
[gpt-5-mini]
model_id = "gpt-5-mini-2025-08-07"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 0.25, output = 2.0 }

[gpt-5-nano]
model_id = "gpt-5-nano-2025-08-07"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 0.05, output = 0.4 }

[gpt-5-chat]
model_id = "gpt-5-chat-2025-08-07"
inputs = ["text", "images"]
outputs = ["text", "structured"]
costs = { input = 1.25, output = 10.0 }

Loading
Loading