Skip to content

Commit e822e68

Browse files
sangggggclaude
andauthored
Make Analytics QualitativeOutput configurable via JSON (#82)
* refactor: rename QualitativeOutput to AIQualitativeOutput with configurable entries - Create qualitative_entries.json resource file for configurable entry definitions - Add QualitativeEntry and QualitativeEntryList models similar to rubric system - Rename QualitativeOutput to AIQualitativeOutput with dynamic HashMap-based entries - Update prompt generation to use configurable entries from resource file - Add convenience methods for backward compatibility (insights(), good_patterns(), etc.) - Update all references across codebase (formatters, TUI, database, tests) - Update TypeScript types in ui-react to match new structure Each qualitative entry now has: key, title, description, item_schema, min_items, max_items This allows adding new analysis categories by modifying qualitative_entries.json * refactor: simplify AIQualitativeOutput to use markdown strings with per-entry LLM requests - Remove item_schema from qualitative entries - use simple markdown string output - Split qualitative analysis into multiple LLM requests (one per entry type) - Change AIQualitativeOutput.entries from HashMap<String, Vec<JsonValue>> to HashMap<String, Vec<String>> - Each qualitative item is now a single markdown line for simplicity and quality - Update formatters and TUI to work with new string-based output - Remove unused typed structs (Insight, GoodPattern, etc.) from services - Update TypeScript types to use string[] for entries This follows the same pattern as rubric evaluation where each entry type gets its own focused LLM request for better quality output. * refactor: remove min_items and max_items from qualitative entries Simplify the QualitativeEntry configuration by removing unnecessary min_items and max_items fields. The LLM will determine appropriate output length based on context without artificial constraints. * fix * fix ci --------- Co-authored-by: Claude <noreply@anthropic.com>
1 parent 090cef1 commit e822e68

File tree

14 files changed

+394
-350
lines changed

14 files changed

+394
-350
lines changed

.github/workflows/ci.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ jobs:
2727
runs-on: ubuntu-latest
2828
steps:
2929
- uses: actions/checkout@v4
30-
- uses: dtolnay/rust-toolchain@1.88.0
30+
- uses: dtolnay/rust-toolchain@1.91.1
3131
with:
3232
components: clippy
3333
- uses: Swatinem/rust-cache@v2
@@ -39,7 +39,7 @@ jobs:
3939
runs-on: ubuntu-latest
4040
steps:
4141
- uses: actions/checkout@v4
42-
- uses: dtolnay/rust-toolchain@1.88.0
42+
- uses: dtolnay/rust-toolchain@1.91.1
4343
with:
4444
components: rustfmt
4545
- name: Run rustfmt
@@ -50,7 +50,7 @@ jobs:
5050
runs-on: ubuntu-latest
5151
steps:
5252
- uses: actions/checkout@v4
53-
- uses: dtolnay/rust-toolchain@1.88.0
53+
- uses: dtolnay/rust-toolchain@1.91.1
5454
- uses: Swatinem/rust-cache@v2
5555
- name: Check compilation
5656
run: cargo check --verbose

mise.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
[tools]
2+
rust = "1.91.1"

resources/qualitative_entries.json

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
{
2+
"version": "1.0",
3+
"entries": [
4+
{
5+
"key": "insights",
6+
"title": "Insights",
7+
"description": "Key observations about the development patterns, communication style, and problem-solving approach"
8+
},
9+
{
10+
"key": "good_patterns",
11+
"title": "Good Patterns",
12+
"description": "Positive habits and practices observed in how the user interacts with the AI"
13+
},
14+
{
15+
"key": "improvement_areas",
16+
"title": "Improvement Areas",
17+
"description": "Areas where the user could enhance their workflow or communication"
18+
},
19+
{
20+
"key": "recommendations",
21+
"title": "Recommendations",
22+
"description": "Actionable suggestions for improvement with specific steps"
23+
},
24+
{
25+
"key": "learning_observations",
26+
"title": "Learning Observations",
27+
"description": "Growth and learning indicators based on what the user was working on"
28+
}
29+
]
30+
}

src/database/analytics_repo.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ impl AnalyticsRepository {
453453
serde_json::from_str(&row.scores_json).context("Failed to deserialize scores")?;
454454
let metrics: crate::models::Metrics =
455455
serde_json::from_str(&row.metrics_json).context("Failed to deserialize metrics")?;
456-
let qualitative_output: crate::services::analytics::QualitativeOutput =
456+
let qualitative_output: crate::services::analytics::AIQualitativeOutput =
457457
serde_json::from_str(&row.qualitative_output_json)
458458
.context("Failed to deserialize qualitative_output")?;
459459
let processed_output: crate::services::analytics::ProcessedQuantitativeOutput =
@@ -525,7 +525,7 @@ impl AnalyticsRepository {
525525
serde_json::from_str(&row.scores_json).context("Failed to deserialize scores")?;
526526
let metrics: crate::models::Metrics =
527527
serde_json::from_str(&row.metrics_json).context("Failed to deserialize metrics")?;
528-
let qualitative_output: crate::services::analytics::QualitativeOutput =
528+
let qualitative_output: crate::services::analytics::AIQualitativeOutput =
529529
serde_json::from_str(&row.qualitative_output_json)
530530
.context("Failed to deserialize qualitative_output")?;
531531
let processed_output: crate::services::analytics::ProcessedQuantitativeOutput =

src/models/analytics.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ use uuid::Uuid;
44

55
// Re-export types from services that will be stored as JSON
66
use crate::services::analytics::{
7-
AIQuantitativeOutput, ProcessedQuantitativeOutput, QualitativeOutput, QuantitativeOutput,
7+
AIQualitativeOutput, AIQuantitativeOutput, ProcessedQuantitativeOutput, QuantitativeOutput,
88
};
99

1010
// =============================================================================
@@ -42,7 +42,7 @@ pub struct Analytics {
4242
pub scores: Scores,
4343
pub metrics: Metrics,
4444

45-
pub qualitative_output: QualitativeOutput,
45+
pub qualitative_output: AIQualitativeOutput,
4646
pub processed_output: ProcessedQuantitativeOutput,
4747
pub ai_quantitative_output: AIQuantitativeOutput,
4848

@@ -57,7 +57,7 @@ impl Analytics {
5757
analytics_request_id: String,
5858
session_id: String,
5959
quantitative_output: QuantitativeOutput,
60-
qualitative_output: QualitativeOutput,
60+
qualitative_output: AIQualitativeOutput,
6161
processed_output: ProcessedQuantitativeOutput,
6262
ai_quantitative_output: AIQuantitativeOutput,
6363
metrics: Metrics,

src/services/analytics/ai_analysis.rs

Lines changed: 102 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
use super::models::{
2-
QualitativeInput, QualitativeOutput, QuantitativeInput, QuantitativeOutput, Rubric,
3-
RubricEvaluationSummary, RubricList, RubricScore,
2+
AIQualitativeOutput, QualitativeEntry, QualitativeEntryList, QualitativeInput,
3+
QuantitativeInput, QuantitativeOutput, Rubric, RubricEvaluationSummary, RubricList,
4+
RubricScore,
45
};
56
use crate::models::message::MessageType;
67
use crate::models::{Message, MessageRole};
78
use crate::services::google_ai::GoogleAiClient;
89
use anyhow::Result;
910
use regex::Regex;
11+
use std::collections::HashMap;
1012

1113
// =============================================================================
1214
// AI Analysis Functions
@@ -31,17 +33,52 @@ pub async fn generate_quantitative_analysis_ai(
3133
pub async fn generate_qualitative_analysis_ai(
3234
qualitative_input: &QualitativeInput,
3335
ai_client: &GoogleAiClient,
34-
) -> Result<QualitativeOutput> {
35-
let prompt = build_qualitative_analysis_prompt(qualitative_input);
36+
entries: Option<&QualitativeEntryList>,
37+
) -> Result<AIQualitativeOutput> {
38+
// Use provided entries or load defaults
39+
let entry_list = match entries {
40+
Some(e) => e.clone(),
41+
None => QualitativeEntryList::default_entries(),
42+
};
43+
44+
// Process each entry type with a separate LLM request for better quality
45+
let mut all_entries: HashMap<String, Vec<String>> = HashMap::new();
46+
47+
for entry in &entry_list.entries {
48+
match generate_single_entry(qualitative_input, entry, ai_client).await {
49+
Ok(items) => {
50+
all_entries.insert(entry.key.clone(), items);
51+
}
52+
Err(e) => {
53+
tracing::warn!("Failed to generate entry {}: {}", entry.key, e);
54+
// Add empty entry on error
55+
all_entries.insert(entry.key.clone(), Vec::new());
56+
}
57+
}
58+
}
59+
60+
Ok(AIQualitativeOutput::new(
61+
all_entries,
62+
entry_list.version.clone(),
63+
))
64+
}
65+
66+
/// Generate a single qualitative entry type with its own LLM request
67+
async fn generate_single_entry(
68+
qualitative_input: &QualitativeInput,
69+
entry: &QualitativeEntry,
70+
ai_client: &GoogleAiClient,
71+
) -> Result<Vec<String>> {
72+
let prompt = build_single_entry_prompt(qualitative_input, entry);
3673

3774
let analysis_request = crate::services::google_ai::models::AnalysisRequest {
3875
prompt,
39-
max_tokens: Some(3072),
76+
max_tokens: Some(1024),
4077
temperature: Some(0.7),
4178
};
4279

4380
let response = ai_client.analytics(analysis_request).await?;
44-
parse_qualitative_response(&response.text)
81+
parse_entry_response(&response.text, entry)
4582
}
4683

4784
// =============================================================================
@@ -131,76 +168,41 @@ Important: Return ONLY the JSON object, no additional text or explanation."#,
131168
)
132169
}
133170

134-
fn build_qualitative_analysis_prompt(input: &QualitativeInput) -> String {
171+
/// Build a prompt for a single qualitative entry type
172+
fn build_single_entry_prompt(input: &QualitativeInput, entry: &QualitativeEntry) -> String {
135173
format!(
136-
r#"Analyze the following development session and provide qualitative insights.
174+
r#"Analyze the following development session and provide {title}.
137175
138176
## Full Session Transcript (JSON)
139177
140178
The following is a complete transcript of the user's conversation with an AI coding assistant.
141179
Each turn includes the message content and any tool uses (file reads, writes, edits, bash commands, etc.).
142180
143181
```json
144-
{}
182+
{session}
145183
```
146184
147185
## Task
148186
149-
Based on the complete session transcript above, provide a comprehensive qualitative analysis with:
187+
{entry_description}
150188
151-
1. **Insights**: Key observations about the development patterns, communication style, and problem-solving approach (2-4 insights)
152-
2. **Good Patterns**: Positive habits and practices observed in how the user interacts with the AI (1-3 patterns)
153-
3. **Improvement Areas**: Areas where the user could enhance their workflow or communication (1-3 areas)
154-
4. **Recommendations**: Actionable suggestions for improvement (2-3 recommendations)
155-
5. **Learning Observations**: Growth and learning indicators based on what the user was working on (1-2 observations)
189+
Each item should be a single, concise markdown line that captures one specific observation.
156190
157-
Return ONLY a valid JSON object with this exact structure:
158-
{{
159-
"insights": [
160-
{{
161-
"title": "string",
162-
"description": "string",
163-
"category": "string (Productivity/Technical/Learning/Collaboration)",
164-
"confidence": 0.0
165-
}}
166-
],
167-
"good_patterns": [
168-
{{
169-
"pattern_name": "string",
170-
"description": "string",
171-
"frequency": 1,
172-
"impact": "string (High/Medium/Low - description)"
173-
}}
174-
],
175-
"improvement_areas": [
176-
{{
177-
"area_name": "string",
178-
"current_state": "string",
179-
"suggested_improvement": "string",
180-
"expected_impact": "string",
181-
"priority": "string (High/Medium/Low)"
182-
}}
183-
],
184-
"recommendations": [
185-
{{
186-
"title": "string",
187-
"description": "string",
188-
"impact_score": 0.0,
189-
"implementation_difficulty": "string (Easy/Medium/Hard)"
190-
}}
191-
],
192-
"learning_observations": [
193-
{{
194-
"observation": "string",
195-
"skill_area": "string",
196-
"progress_indicator": "string",
197-
"next_steps": ["string"]
198-
}}
199-
]
200-
}}
191+
## Required Output Format
201192
202-
Important: Return ONLY the JSON object, no additional text or explanation."#,
203-
input.raw_session
193+
Return ONLY a numbered list of items, one per line. Each line should be a complete, self-contained observation.
194+
195+
Example format:
196+
1. **Observation title**: Brief description of the observation with specific details.
197+
2. **Another observation**: Another specific point with supporting evidence.
198+
199+
Important:
200+
- Return ONLY the numbered list, no additional text, headers, or explanation.
201+
- Each item must be a single line of markdown text.
202+
- Focus on specific, actionable observations from the session."#,
203+
title = entry.title.to_lowercase(),
204+
session = input.raw_session,
205+
entry_description = entry.format_for_prompt(),
204206
)
205207
}
206208

@@ -226,18 +228,49 @@ fn parse_quantitative_response(response_text: &str) -> Result<QuantitativeOutput
226228
}
227229
}
228230

229-
fn parse_qualitative_response(response_text: &str) -> Result<QualitativeOutput> {
230-
// Try to extract JSON from the response
231-
let json_text = extract_json_from_text(response_text);
231+
/// Parse the LLM response for a single entry type
232+
/// Expects a numbered list of markdown lines
233+
fn parse_entry_response(response_text: &str, entry: &QualitativeEntry) -> Result<Vec<String>> {
234+
let mut items = Vec::new();
235+
236+
// Parse numbered list items (e.g., "1. ...", "2. ...")
237+
let numbered_re = Regex::new(r"^\s*\d+\.\s*(.+)$").unwrap();
238+
239+
for line in response_text.lines() {
240+
let trimmed = line.trim();
241+
if trimmed.is_empty() {
242+
continue;
243+
}
232244

233-
serde_json::from_str::<QualitativeOutput>(&json_text).map_err(|e| {
245+
// Check if it's a numbered list item
246+
if let Some(caps) = numbered_re.captures(trimmed) {
247+
if let Some(content) = caps.get(1) {
248+
let item = content.as_str().trim().to_string();
249+
if !item.is_empty() {
250+
items.push(item);
251+
}
252+
}
253+
} else if trimmed.starts_with("- ") || trimmed.starts_with("* ") {
254+
// Also handle bullet points
255+
let item = trimmed[2..].trim().to_string();
256+
if !item.is_empty() {
257+
items.push(item);
258+
}
259+
} else if trimmed.starts_with("**") {
260+
// Handle lines that start with bold text (common in markdown)
261+
items.push(trimmed.to_string());
262+
}
263+
}
264+
265+
if items.is_empty() {
234266
tracing::warn!(
235-
"Failed to parse AI response as JSON: {}. Response: {}",
236-
e,
267+
"No items parsed for entry {}, response: {}",
268+
entry.key,
237269
response_text
238270
);
239-
anyhow::anyhow!("Failed to parse AI qualitative response: {}", e)
240-
})
271+
}
272+
273+
Ok(items)
241274
}
242275

243276
fn extract_json_from_text(text: &str) -> String {

0 commit comments

Comments
 (0)