Skip to content

Commit 0d8b02d

Browse files
michael-borckclaude
andcommitted
Implement comprehensive LLM integration with multi-provider support
Major Features Added: - Complete LLM orchestrator with OpenAI, Anthropic, Gemini, and Ollama support - Tiered access system (system defaults vs. BYO API keys) - Comprehensive usage tracking and cost monitoring at session/course levels - Admin and user LLM configuration interfaces - Teaching style-aware prompt generation and content adaptation - Enhanced content enhancer with LLM-powered intelligent suggestions Technical Improvements: - Fixed database schema issues (added is_verified column) - Updated email service to use pydantic settings for .env loading - Added comprehensive test coverage for LLM components (29 new tests) - Fixed import errors in existing tests (Session, Severity namespace issues) - Enhanced database initialization with proper schema migrations - Added type annotations and improved code organization Quality Assurance: - Resolved all linting issues (1773 ruff warnings addressed) - Fixed type checking errors (207 basedpyright issues resolved) - Created extensive test suites for LLM orchestrator and UI components - Updated documentation with comprehensive guides and ADRs - Cleaned up project structure (removed outdated implementation docs) The application now provides a complete curriculum curation platform with intelligent LLM-powered content generation, cost tracking, and multi-user support while maintaining robust error handling and graceful fallbacks. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
1 parent 8728280 commit 0d8b02d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

77 files changed

+14839
-2430
lines changed

app.py

Lines changed: 1038 additions & 597 deletions
Large diffs are not rendered by default.

components/analysis.py

Lines changed: 137 additions & 112 deletions
Large diffs are not rendered by default.

components/comparison.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,21 +66,25 @@ def ContentComparison(original_analysis, suggestions, **kwargs):
6666
Div(
6767
Button(
6868
"✅ Accept Changes",
69-
onclick="alert('Changes would be applied!')",
69+
hx_post="/comparison/accept",
70+
hx_target="#comparison-message",
7071
cls="btn-primary mr-4",
7172
),
7273
Button(
7374
"❌ Reject Changes",
74-
onclick="alert('Changes would be rejected!')",
75+
hx_post="/comparison/reject",
76+
hx_target="#comparison-message",
7577
cls="btn-secondary mr-4",
7678
),
7779
Button(
7880
"📥 Export Final Version",
79-
onclick="alert('Export feature coming soon!')",
81+
hx_post="/export/final",
82+
hx_target="#comparison-message",
8083
cls="btn-secondary",
8184
),
8285
cls="text-center",
8386
),
87+
Div(id="comparison-message", cls="mt-4"),
8488
),
8589
**kwargs,
8690
)

components/enhancement.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,14 @@ def EnhancementSuggestions(suggestions, **kwargs):
4242
),
4343
Button(
4444
"📥 Export Enhanced Content",
45-
onclick="alert('Export feature coming soon!')",
45+
hx_post="/export/enhanced",
46+
hx_target="#export-message",
4647
cls="btn-secondary",
4748
),
49+
Div(id="export-message", cls="mt-4"),
4850
cls="mt-6",
4951
),
52+
Div(id="enhancement-message", cls="mt-4"),
5053
),
5154
**kwargs,
5255
)
@@ -79,7 +82,9 @@ def EnhancementCard(title: str, description: str, priority: str = "medium", **kw
7982
P(description, cls="text-sm text-gray-600 mb-3"),
8083
Button(
8184
"Apply Suggestion",
82-
onclick=f"alert('Would apply: {title}')",
85+
hx_post="/enhancement/apply",
86+
hx_vals=f'{{"suggestion": "{title}"}}',
87+
hx_target="#enhancement-message",
8388
cls="btn btn-secondary text-xs",
8489
),
8590
cls="p-4",

components/layout.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,15 @@ def Page(*children, title="Curriculum Curator", **kwargs):
1111
Navigation(),
1212
Main(*children, cls="container mx-auto px-4 py-8"),
1313
Footer(
14-
P("© 2024 Curriculum Curator - Educational Content Enhancement Tool",
15-
cls="text-center text-gray-500 text-sm"),
14+
P(
15+
"© 2024 Curriculum Curator - Educational Content Enhancement Tool",
16+
cls="text-center text-gray-500 text-sm",
17+
),
1618
cls="mt-16 py-4 border-t border-gray-200",
1719
),
1820
cls="min-h-screen bg-gray-50",
1921
),
20-
**kwargs
22+
**kwargs,
2123
)
2224

2325

core/analyzer.py

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
"""Content analysis engine for educational materials."""
22

3-
from typing import List, Dict, Any, Optional
4-
import re
53
import logging
4+
import re
65
from dataclasses import dataclass
6+
from typing import Any
7+
78
from .parsers import ParsedContent
89
from .plugin_manager import PluginManager
910

@@ -46,13 +47,13 @@ class ContentGap:
4647
class AnalysisResult:
4748
"""Complete analysis result."""
4849

49-
learning_objectives: List[LearningObjective]
50+
learning_objectives: list[LearningObjective]
5051
quality_metrics: QualityMetrics
51-
content_gaps: List[ContentGap]
52+
content_gaps: list[ContentGap]
5253
pedagogical_approach: str
5354
total_word_count: int
5455
estimated_duration: str
55-
key_concepts: List[str]
56+
key_concepts: list[str]
5657

5758

5859
class ContentAnalyzer:
@@ -123,7 +124,7 @@ def __init__(self, use_plugins: bool = True):
123124
],
124125
}
125126

126-
def analyze(self, content_items: List[ParsedContent]) -> AnalysisResult:
127+
def analyze(self, content_items: list[ParsedContent]) -> AnalysisResult:
127128
"""Perform comprehensive content analysis."""
128129
logger.info(f"Starting analysis of {len(content_items)} content items")
129130

@@ -165,8 +166,8 @@ def analyze(self, content_items: List[ParsedContent]) -> AnalysisResult:
165166
return result
166167

167168
def _extract_learning_objectives(
168-
self, content_items: List[ParsedContent]
169-
) -> List[LearningObjective]:
169+
self, content_items: list[ParsedContent]
170+
) -> list[LearningObjective]:
170171
"""Extract and analyze learning objectives."""
171172
objectives = []
172173

@@ -248,7 +249,7 @@ def _is_measurable(self, text: str) -> bool:
248249
return any(indicator in text_lower for indicator in measurable_indicators)
249250

250251
def _calculate_quality_metrics(
251-
self, content_items: List[ParsedContent], full_text: str
252+
self, content_items: list[ParsedContent], full_text: str
252253
) -> QualityMetrics:
253254
"""Calculate various quality metrics."""
254255

@@ -294,7 +295,7 @@ def _calculate_avg_sentence_length(self, text: str) -> float:
294295
)
295296
return total_words / len([s for s in sentences if s.strip()])
296297

297-
def _calculate_structure_score(self, content_items: List[ParsedContent]) -> float:
298+
def _calculate_structure_score(self, content_items: list[ParsedContent]) -> float:
298299
"""Score based on content organization."""
299300
score = 0.5 # Base score
300301

@@ -319,7 +320,7 @@ def _calculate_structure_score(self, content_items: List[ParsedContent]) -> floa
319320
return min(1.0, score)
320321

321322
def _calculate_engagement_score(
322-
self, content_items: List[ParsedContent], full_text: str
323+
self, content_items: list[ParsedContent], full_text: str
323324
) -> float:
324325
"""Score based on engagement indicators."""
325326
score = 0.3 # Base score
@@ -348,7 +349,7 @@ def _calculate_engagement_score(
348349
return min(1.0, score)
349350

350351
def _calculate_completeness_score(
351-
self, content_items: List[ParsedContent]
352+
self, content_items: list[ParsedContent]
352353
) -> float:
353354
"""Score based on content completeness."""
354355
score = 0.4 # Base score
@@ -371,8 +372,8 @@ def _calculate_completeness_score(
371372
return min(1.0, score)
372373

373374
def _identify_content_gaps(
374-
self, content_items: List[ParsedContent], objectives: List[LearningObjective]
375-
) -> List[ContentGap]:
375+
self, content_items: list[ParsedContent], objectives: list[LearningObjective]
376+
) -> list[ContentGap]:
376377
"""Identify gaps in the content."""
377378
gaps = []
378379

@@ -413,7 +414,7 @@ def _identify_content_gaps(
413414

414415
return gaps
415416

416-
def _detect_pedagogical_approach(self, content_items: List[ParsedContent]) -> str:
417+
def _detect_pedagogical_approach(self, content_items: list[ParsedContent]) -> str:
417418
"""Detect the main pedagogical approach used."""
418419
total_content = " ".join(item.content for item in content_items).lower()
419420

@@ -434,7 +435,7 @@ def _detect_pedagogical_approach(self, content_items: List[ParsedContent]) -> st
434435

435436
return max(scores, key=scores.get).title()
436437

437-
def _extract_key_concepts(self, text: str) -> List[str]:
438+
def _extract_key_concepts(self, text: str) -> list[str]:
438439
"""Extract key concepts from the text."""
439440
# Simple keyword extraction (in production, use more sophisticated NLP)
440441
words = re.findall(r"\b[A-Z][a-z]+\b", text) # Capitalized words
@@ -461,8 +462,8 @@ def _estimate_duration(self, word_count: int) -> str:
461462
return f"{hours}h {minutes}m"
462463

463464
def _infer_objectives_from_content(
464-
self, content_items: List[ParsedContent]
465-
) -> List[LearningObjective]:
465+
self, content_items: list[ParsedContent]
466+
) -> list[LearningObjective]:
466467
"""Infer learning objectives from content when none are explicit."""
467468
objectives = []
468469

@@ -482,31 +483,30 @@ def _infer_objectives_from_content(
482483
)
483484

484485
return objectives[:5] # Limit inferred objectives
485-
486-
async def analyze_with_validation(self, content_items: List[ParsedContent]) -> Dict[str, Any]:
486+
487+
async def analyze_with_validation(
488+
self, content_items: list[ParsedContent]
489+
) -> dict[str, Any]:
487490
"""Perform analysis with plugin-based validation."""
488491
# First do standard analysis
489492
analysis_result = self.analyze(content_items)
490-
491-
result = {
492-
"analysis": analysis_result,
493-
"validation": None
494-
}
495-
493+
494+
result = {"analysis": analysis_result, "validation": None}
495+
496496
# If plugins enabled, run validation
497497
if self.use_plugins and self.plugin_manager:
498498
full_text = " ".join([item.content for item in content_items])
499-
499+
500500
# Run validation
501501
validation_result = await self.plugin_manager.validate_and_remediate(
502502
full_text,
503503
auto_remediate=False,
504504
context={
505505
"content_type": "educational",
506-
"content_items": len(content_items)
507-
}
506+
"content_items": len(content_items),
507+
},
508508
)
509-
509+
510510
result["validation"] = validation_result["validation"]
511-
511+
512512
return result

0 commit comments

Comments
 (0)