diff --git a/- b/- new file mode 100644 index 000000000..25738e52d --- /dev/null +++ b/- @@ -0,0 +1,9 @@ +{ + "access_token": "B3J5cWK4bDohWG7MLN1yLjOSsoH2JG", + "expires_in": 1209600, + "token_type": "Bearer", + "scope": "export", + "refresh_token": "Ll74PJtXGQnVyF7gd48fLmPX8T2twp", + "expires_at": "2026-03-20T15:10:13.805595", + "saved_at": "2026-03-06T15:10:13.805637" +} \ No newline at end of file diff --git a/.cli_token b/.cli_token new file mode 100644 index 000000000..d42e54e6c --- /dev/null +++ b/.cli_token @@ -0,0 +1,9 @@ +{ + "access_token": "XRoukVPzJAm0YCj4W2Nf6ADJ3VGjv5", + "expires_in": 1209600, + "token_type": "Bearer", + "scope": "export", + "refresh_token": "s2kDfkuMDUaweBG20fptDGkVrWUTGd", + "expires_at": "2026-03-20T23:20:28.558342", + "saved_at": "2026-03-06T23:20:28.558354" +} \ No newline at end of file diff --git a/commcare_connect/workflow/tests/e2e/conftest.py b/commcare_connect/workflow/tests/e2e/conftest.py index 36d9d19a0..6d52cbc9e 100644 --- a/commcare_connect/workflow/tests/e2e/conftest.py +++ b/commcare_connect/workflow/tests/e2e/conftest.py @@ -71,7 +71,7 @@ def live_server_url(): server_log = open("e2e_django_server.log", "w") proc = subprocess.Popen( - [sys.executable, "manage.py", "runserver", f"{E2E_HOST}:{E2E_PORT}"], + [sys.executable, "manage.py", "runserver", f"{E2E_HOST}:{E2E_PORT}", "--noreload"], stdout=server_log, stderr=subprocess.STDOUT, ) diff --git a/docs/plans/2026-03-02-solicitations-new.md b/docs/plans/2026-03-02-solicitations-new.md new file mode 100644 index 000000000..4e35fa887 --- /dev/null +++ b/docs/plans/2026-03-02-solicitations-new.md @@ -0,0 +1,2725 @@ +# Solicitations New — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Build a public-facing solicitations system (`solicitations_new`) where program managers post RFPs/EOIs, respondents submit as LLOEntities, and managers review — all backed by LabsRecord API with Django UI, JSON API, and MCP tool consumers on a shared data_access layer. + +**Architecture:** Thin API Layer (Approach A). One `data_access.py` module contains all business logic and talks to LabsRecord API. Three consumers call into it: Django template views (server-rendered HTML), JSON API views (simple Django views returning JSON), and MCP tools. No local Django ORM storage. + +**Tech Stack:** Django 4.x, LabsRecordAPIClient, Crispy Forms + Tailwind, Alpine.js (dynamic question builder), httpx, pytest + unittest.mock + +**Design doc:** `docs/plans/2026-03-02-solicitations-new-design.md` + +--- + +## Task 1: App Scaffolding & Registration + +**Files:** +- Create: `commcare_connect/solicitations_new/__init__.py` +- Create: `commcare_connect/solicitations_new/apps.py` +- Create: `commcare_connect/solicitations_new/models.py` (empty placeholder) +- Create: `commcare_connect/solicitations_new/urls.py` (minimal) +- Create: `commcare_connect/solicitations_new/views.py` (minimal) +- Modify: `config/settings/base.py` (~line 155, LOCAL_APPS list) +- Modify: `config/urls.py` (~line 45, urlpatterns) +- Modify: `commcare_connect/labs/middleware.py` (~line 87, WHITELISTED_PREFIXES) + +**Step 1: Create the app directory and files** + +```python +# commcare_connect/solicitations_new/__init__.py +# (empty) +``` + +```python +# commcare_connect/solicitations_new/apps.py +from django.apps import AppConfig + + +class SolicitationsNewConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "commcare_connect.solicitations_new" +``` + +```python +# commcare_connect/solicitations_new/models.py +# Proxy models defined in Task 2 +``` + +```python +# commcare_connect/solicitations_new/views.py +from django.http import JsonResponse + + +def health_check(request): + return JsonResponse({"status": "ok", "app": "solicitations_new"}) +``` + +```python +# commcare_connect/solicitations_new/urls.py +from django.urls import path + +from . import views + +app_name = "solicitations_new" + +urlpatterns = [ + path("health/", views.health_check, name="health_check"), +] +``` + +**Step 2: Register the app in settings, URLs, and middleware** + +In `config/settings/base.py`, add to `LOCAL_APPS` list (after `commcare_connect.solicitations`): +```python +"commcare_connect.solicitations_new", +``` + +In `config/urls.py`, add to `urlpatterns` (after the solicitations line): +```python +path("solicitations_new/", include("commcare_connect.solicitations_new.urls", namespace="solicitations_new")), +``` + +In `commcare_connect/labs/middleware.py`, add to `WHITELISTED_PREFIXES`: +```python +"/solicitations_new/", +``` + +**Step 3: Verify the app loads** + +Run: `python manage.py check --deploy 2>&1 | head -5` +Expected: No errors related to solicitations_new + +Run: `python manage.py shell -c "from commcare_connect.solicitations_new import views; print('OK')"` +Expected: `OK` + +**Step 4: Commit** + +```bash +git add commcare_connect/solicitations_new/ config/settings/base.py config/urls.py commcare_connect/labs/middleware.py +git commit -m "feat(solicitations_new): scaffold app, register in settings/urls/middleware" +``` + +--- + +## Task 2: Proxy Models + +**Files:** +- Create: `commcare_connect/solicitations_new/models.py` +- Create: `commcare_connect/solicitations_new/tests/__init__.py` +- Create: `commcare_connect/solicitations_new/tests/test_models.py` + +**Step 1: Write the failing tests** + +```python +# commcare_connect/solicitations_new/tests/__init__.py +# (empty) +``` + +```python +# commcare_connect/solicitations_new/tests/test_models.py +import pytest +from commcare_connect.solicitations_new.models import ( + SolicitationRecord, + ResponseRecord, + ReviewRecord, +) + + +class TestSolicitationRecord: + def _make(self, **overrides): + defaults = { + "id": 1, + "experiment": "test_program", + "type": "solicitation", + "data": { + "title": "Test Solicitation", + "description": "A test", + "scope_of_work": "Do the work", + "solicitation_type": "rfp", + "status": "active", + "is_public": True, + "questions": [{"id": "q1", "text": "Why?", "type": "text", "required": True}], + "application_deadline": "2026-06-01", + "expected_start_date": "2026-07-01", + "expected_end_date": "2026-12-31", + "estimated_scale": "1000 beneficiaries", + "contact_email": "test@example.com", + "created_by": "testuser", + "program_name": "Test Program", + }, + } + defaults["data"].update(overrides.pop("data", {})) + defaults.update(overrides) + return SolicitationRecord(**defaults) + + def test_title(self): + rec = self._make() + assert rec.title == "Test Solicitation" + + def test_solicitation_type(self): + rec = self._make() + assert rec.solicitation_type == "rfp" + + def test_is_public(self): + rec = self._make() + assert rec.is_public is True + + def test_application_deadline_parses(self): + rec = self._make() + from datetime import date + assert rec.application_deadline == date(2026, 6, 1) + + def test_application_deadline_none(self): + rec = self._make(data={"application_deadline": None}) + assert rec.application_deadline is None + + def test_questions(self): + rec = self._make() + assert len(rec.questions) == 1 + assert rec.questions[0]["id"] == "q1" + + def test_can_accept_responses(self): + rec = self._make(data={"status": "active"}) + assert rec.can_accept_responses() is True + rec2 = self._make(data={"status": "closed"}) + assert rec2.can_accept_responses() is False + + +class TestResponseRecord: + def _make(self, **overrides): + defaults = { + "id": 10, + "experiment": "llo_entity_123", + "type": "solicitation_response", + "data": { + "solicitation_id": 1, + "llo_entity_id": "llo_entity_123", + "llo_entity_name": "Test Org", + "responses": {"q1": "Because"}, + "status": "submitted", + "submitted_by_name": "Jane Doe", + "submitted_by_email": "jane@example.com", + "submission_date": "2026-05-15T10:00:00Z", + }, + } + defaults["data"].update(overrides.pop("data", {})) + defaults.update(overrides) + return ResponseRecord(**defaults) + + def test_solicitation_id(self): + rec = self._make() + assert rec.solicitation_id == 1 + + def test_llo_entity_name(self): + rec = self._make() + assert rec.llo_entity_name == "Test Org" + + def test_responses_dict(self): + rec = self._make() + assert rec.responses == {"q1": "Because"} + + def test_status(self): + rec = self._make() + assert rec.status == "submitted" + + +class TestReviewRecord: + def _make(self, **overrides): + defaults = { + "id": 20, + "experiment": "llo_entity_123", + "type": "solicitation_review", + "data": { + "response_id": 10, + "score": 85, + "recommendation": "approved", + "notes": "Looks good", + "tags": "experienced,local", + "reviewer_username": "reviewer1", + "review_date": "2026-05-20T14:00:00Z", + }, + } + defaults["data"].update(overrides.pop("data", {})) + defaults.update(overrides) + return ReviewRecord(**defaults) + + def test_score(self): + rec = self._make() + assert rec.score == 85 + + def test_recommendation(self): + rec = self._make() + assert rec.recommendation == "approved" + + def test_reviewer_username(self): + rec = self._make() + assert rec.reviewer_username == "reviewer1" +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest commcare_connect/solicitations_new/tests/test_models.py -v` +Expected: FAIL — `ImportError: cannot import name 'SolicitationRecord'` + +**Step 3: Implement the proxy models** + +```python +# commcare_connect/solicitations_new/models.py +from datetime import date, datetime + +from commcare_connect.labs.models import LocalLabsRecord + + +class SolicitationRecord(LocalLabsRecord): + """Proxy model for solicitation records. Scoped by program_id.""" + + @property + def title(self): + return self.data.get("title", "") + + @property + def description(self): + return self.data.get("description", "") + + @property + def scope_of_work(self): + return self.data.get("scope_of_work", "") + + @property + def solicitation_type(self): + return self.data.get("solicitation_type", "") + + @property + def status(self): + return self.data.get("status", "draft") + + @property + def is_public(self): + return self.data.get("is_public", False) + + @property + def questions(self): + return self.data.get("questions", []) + + @property + def application_deadline(self): + date_str = self.data.get("application_deadline") + if date_str: + try: + return datetime.strptime(date_str, "%Y-%m-%d").date() + except (ValueError, TypeError): + return None + return None + + @property + def expected_start_date(self): + date_str = self.data.get("expected_start_date") + if date_str: + try: + return datetime.strptime(date_str, "%Y-%m-%d").date() + except (ValueError, TypeError): + return None + return None + + @property + def expected_end_date(self): + date_str = self.data.get("expected_end_date") + if date_str: + try: + return datetime.strptime(date_str, "%Y-%m-%d").date() + except (ValueError, TypeError): + return None + return None + + @property + def estimated_scale(self): + return self.data.get("estimated_scale", "") + + @property + def contact_email(self): + return self.data.get("contact_email", "") + + @property + def created_by(self): + return self.data.get("created_by", "") + + @property + def program_name(self): + return self.data.get("program_name", "") + + def can_accept_responses(self): + return self.status == "active" + + +class ResponseRecord(LocalLabsRecord): + """Proxy model for response records. Scoped by llo_entity_id.""" + + @property + def solicitation_id(self): + return self.data.get("solicitation_id") + + @property + def llo_entity_id(self): + return self.data.get("llo_entity_id", "") + + @property + def llo_entity_name(self): + return self.data.get("llo_entity_name", "") + + @property + def responses(self): + return self.data.get("responses", {}) + + @property + def status(self): + return self.data.get("status", "draft") + + @property + def submitted_by_name(self): + return self.data.get("submitted_by_name", "") + + @property + def submitted_by_email(self): + return self.data.get("submitted_by_email", "") + + @property + def submission_date(self): + date_str = self.data.get("submission_date") + if date_str: + try: + return datetime.fromisoformat(date_str.replace("Z", "+00:00")) + except (ValueError, TypeError): + return None + return None + + +class ReviewRecord(LocalLabsRecord): + """Proxy model for review records.""" + + @property + def response_id(self): + return self.data.get("response_id") + + @property + def score(self): + return self.data.get("score") + + @property + def recommendation(self): + return self.data.get("recommendation", "") + + @property + def notes(self): + return self.data.get("notes", "") + + @property + def tags(self): + return self.data.get("tags", "") + + @property + def reviewer_username(self): + return self.data.get("reviewer_username", "") + + @property + def review_date(self): + date_str = self.data.get("review_date") + if date_str: + try: + return datetime.fromisoformat(date_str.replace("Z", "+00:00")) + except (ValueError, TypeError): + return None + return None +``` + +**Step 4: Run tests to verify they pass** + +Run: `pytest commcare_connect/solicitations_new/tests/test_models.py -v` +Expected: All PASS + +**Step 5: Commit** + +```bash +git add commcare_connect/solicitations_new/models.py commcare_connect/solicitations_new/tests/ +git commit -m "feat(solicitations_new): add proxy models for solicitation, response, review" +``` + +--- + +## Task 3: Data Access Layer + +**Files:** +- Create: `commcare_connect/solicitations_new/data_access.py` +- Create: `commcare_connect/solicitations_new/tests/test_data_access.py` + +**Reference:** Follow `commcare_connect/tasks/data_access.py` pattern for constructor, and `commcare_connect/solicitations/data_access.py` for CRUD methods. + +**Step 1: Write the failing tests** + +```python +# commcare_connect/solicitations_new/tests/test_data_access.py +from unittest.mock import MagicMock, patch + +import pytest + +from commcare_connect.solicitations_new.data_access import SolicitationsNewDataAccess +from commcare_connect.solicitations_new.models import ( + ResponseRecord, + ReviewRecord, + SolicitationRecord, +) + + +@pytest.fixture +def mock_request(): + req = MagicMock() + req.labs_context = {"program_id": "prog_1", "organization_id": "org_1"} + req.session = {"labs_oauth": {"access_token": "test_token", "expires_at": 9999999999}} + return req + + +@pytest.fixture +def mock_api_client(): + with patch( + "commcare_connect.solicitations_new.data_access.LabsRecordAPIClient" + ) as MockClient: + client = MockClient.return_value + yield client + + +class TestSolicitationCRUD: + def test_get_solicitations(self, mock_request, mock_api_client): + mock_api_client.get_records.return_value = [ + {"id": 1, "experiment": "prog_1", "type": "solicitation_new", + "data": {"title": "Test", "status": "active", "is_public": True}}, + ] + da = SolicitationsNewDataAccess(request=mock_request) + results = da.get_solicitations() + assert len(results) == 1 + assert isinstance(results[0], SolicitationRecord) + assert results[0].title == "Test" + + def test_get_solicitation_by_id(self, mock_request, mock_api_client): + mock_api_client.get_record_by_id.return_value = { + "id": 1, "experiment": "prog_1", "type": "solicitation_new", + "data": {"title": "Detail Test"}, + } + da = SolicitationsNewDataAccess(request=mock_request) + result = da.get_solicitation_by_id(1) + assert isinstance(result, SolicitationRecord) + assert result.title == "Detail Test" + + def test_get_public_solicitations(self, mock_request, mock_api_client): + mock_api_client.get_records.return_value = [ + {"id": 1, "experiment": "prog_1", "type": "solicitation_new", + "data": {"title": "Public", "status": "active", "is_public": True}}, + {"id": 2, "experiment": "prog_1", "type": "solicitation_new", + "data": {"title": "Private", "status": "active", "is_public": False}}, + ] + da = SolicitationsNewDataAccess(request=mock_request) + results = da.get_public_solicitations() + assert len(results) == 1 + assert results[0].title == "Public" + + def test_create_solicitation(self, mock_request, mock_api_client): + mock_api_client.create_record.return_value = { + "id": 5, "experiment": "prog_1", "type": "solicitation_new", + "data": {"title": "New One"}, + } + da = SolicitationsNewDataAccess(request=mock_request) + result = da.create_solicitation({"title": "New One"}) + assert isinstance(result, SolicitationRecord) + mock_api_client.create_record.assert_called_once() + + def test_update_solicitation(self, mock_request, mock_api_client): + mock_api_client.update_record.return_value = { + "id": 1, "experiment": "prog_1", "type": "solicitation_new", + "data": {"title": "Updated"}, + } + da = SolicitationsNewDataAccess(request=mock_request) + result = da.update_solicitation(1, {"title": "Updated"}) + assert result.title == "Updated" + + +class TestResponseCRUD: + def test_get_responses_for_solicitation(self, mock_request, mock_api_client): + mock_api_client.get_records.return_value = [ + {"id": 10, "experiment": "llo_1", "type": "solicitation_new_response", + "data": {"solicitation_id": 1, "status": "submitted"}}, + ] + da = SolicitationsNewDataAccess(request=mock_request) + results = da.get_responses_for_solicitation(1) + assert len(results) == 1 + assert isinstance(results[0], ResponseRecord) + + def test_create_response(self, mock_request, mock_api_client): + mock_api_client.create_record.return_value = { + "id": 11, "experiment": "llo_1", "type": "solicitation_new_response", + "data": {"solicitation_id": 1, "llo_entity_id": "llo_1"}, + } + da = SolicitationsNewDataAccess(request=mock_request) + result = da.create_response( + solicitation_id=1, + llo_entity_id="llo_1", + data={"responses": {"q1": "answer"}}, + ) + assert isinstance(result, ResponseRecord) + + +class TestReviewCRUD: + def test_create_review(self, mock_request, mock_api_client): + mock_api_client.create_record.return_value = { + "id": 20, "experiment": "llo_1", "type": "solicitation_new_review", + "data": {"response_id": 10, "score": 90}, + } + da = SolicitationsNewDataAccess(request=mock_request) + result = da.create_review(response_id=10, data={"score": 90}) + assert isinstance(result, ReviewRecord) + + def test_update_review(self, mock_request, mock_api_client): + mock_api_client.update_record.return_value = { + "id": 20, "experiment": "llo_1", "type": "solicitation_new_review", + "data": {"response_id": 10, "score": 95}, + } + da = SolicitationsNewDataAccess(request=mock_request) + result = da.update_review(20, {"score": 95}) + assert result.score == 95 +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest commcare_connect/solicitations_new/tests/test_data_access.py -v` +Expected: FAIL — `ImportError: cannot import name 'SolicitationsNewDataAccess'` + +**Step 3: Implement the data access layer** + +```python +# commcare_connect/solicitations_new/data_access.py +import logging + +from commcare_connect.labs.integrations.connect.api_client import LabsRecordAPIClient +from commcare_connect.solicitations_new.models import ( + ResponseRecord, + ReviewRecord, + SolicitationRecord, +) + +logger = logging.getLogger(__name__) + +# LabsRecord type constants +TYPE_SOLICITATION = "solicitation_new" +TYPE_RESPONSE = "solicitation_new_response" +TYPE_REVIEW = "solicitation_new_review" + + +class SolicitationsNewDataAccess: + """Data access layer for solicitations_new. All CRUD via LabsRecord API.""" + + def __init__(self, program_id=None, access_token=None, request=None): + self.program_id = program_id + self.access_token = access_token + + if request and hasattr(request, "labs_context"): + labs_context = request.labs_context + if not self.program_id and "program_id" in labs_context: + self.program_id = labs_context["program_id"] + + if not self.access_token and request: + labs_oauth = getattr(request, "session", {}).get("labs_oauth", {}) + self.access_token = labs_oauth.get("access_token") + + self.labs_api = LabsRecordAPIClient( + access_token=self.access_token, + ) + + # ── Solicitation CRUD ────────────────────────────────────── + + def get_solicitations(self, status=None, solicitation_type=None): + """Get solicitations for current program.""" + records = self.labs_api.get_records( + experiment=self.program_id, + record_type=TYPE_SOLICITATION, + ) + results = [SolicitationRecord(**r) for r in records] + if status: + results = [r for r in results if r.status == status] + if solicitation_type: + results = [r for r in results if r.solicitation_type == solicitation_type] + return results + + def get_public_solicitations(self, solicitation_type=None): + """Get all public, active solicitations across all programs.""" + records = self.labs_api.get_records( + record_type=TYPE_SOLICITATION, + ) + results = [SolicitationRecord(**r) for r in records] + results = [r for r in results if r.is_public and r.status == "active"] + if solicitation_type: + results = [r for r in results if r.solicitation_type == solicitation_type] + return results + + def get_solicitation_by_id(self, solicitation_id): + """Get a single solicitation by ID.""" + record = self.labs_api.get_record_by_id(solicitation_id) + if record: + return SolicitationRecord(**record) + return None + + def create_solicitation(self, data): + """Create a new solicitation under the current program.""" + record = self.labs_api.create_record( + experiment=self.program_id, + record_type=TYPE_SOLICITATION, + data=data, + ) + return SolicitationRecord(**record) + + def update_solicitation(self, solicitation_id, data): + """Update an existing solicitation.""" + record = self.labs_api.update_record( + record_id=solicitation_id, + data=data, + ) + return SolicitationRecord(**record) + + # ── Response CRUD ────────────────────────────────────────── + + def get_responses_for_solicitation(self, solicitation_id): + """Get all responses for a solicitation.""" + records = self.labs_api.get_records( + record_type=TYPE_RESPONSE, + ) + results = [ResponseRecord(**r) for r in records] + return [r for r in results if r.solicitation_id == solicitation_id] + + def get_response_by_id(self, response_id): + """Get a single response by ID.""" + record = self.labs_api.get_record_by_id(response_id) + if record: + return ResponseRecord(**record) + return None + + def create_response(self, solicitation_id, llo_entity_id, data): + """Create a new response.""" + data["solicitation_id"] = solicitation_id + data["llo_entity_id"] = llo_entity_id + record = self.labs_api.create_record( + experiment=llo_entity_id, + record_type=TYPE_RESPONSE, + data=data, + ) + return ResponseRecord(**record) + + def update_response(self, response_id, data): + """Update an existing response.""" + record = self.labs_api.update_record( + record_id=response_id, + data=data, + ) + return ResponseRecord(**record) + + # ── Review CRUD ──────────────────────────────────────────── + + def get_reviews_for_response(self, response_id): + """Get all reviews for a response.""" + records = self.labs_api.get_records( + record_type=TYPE_REVIEW, + ) + results = [ReviewRecord(**r) for r in records] + return [r for r in results if r.response_id == response_id] + + def get_review_by_id(self, review_id): + """Get a single review by ID.""" + record = self.labs_api.get_record_by_id(review_id) + if record: + return ReviewRecord(**record) + return None + + def create_review(self, response_id, data): + """Create a new review for a response.""" + data["response_id"] = response_id + # Get the response to find the llo_entity_id for scoping + response = self.get_response_by_id(response_id) + experiment = response.llo_entity_id if response else "" + record = self.labs_api.create_record( + experiment=experiment, + record_type=TYPE_REVIEW, + data=data, + ) + return ReviewRecord(**record) + + def update_review(self, review_id, data): + """Update an existing review.""" + record = self.labs_api.update_record( + record_id=review_id, + data=data, + ) + return ReviewRecord(**record) +``` + +**Step 4: Run tests to verify they pass** + +Run: `pytest commcare_connect/solicitations_new/tests/test_data_access.py -v` +Expected: All PASS + +**Step 5: Commit** + +```bash +git add commcare_connect/solicitations_new/data_access.py commcare_connect/solicitations_new/tests/test_data_access.py +git commit -m "feat(solicitations_new): add data access layer with solicitation/response/review CRUD" +``` + +--- + +## Task 4: Forms + +**Files:** +- Create: `commcare_connect/solicitations_new/forms.py` +- Create: `commcare_connect/solicitations_new/tests/test_forms.py` + +**Step 1: Write the failing tests** + +```python +# commcare_connect/solicitations_new/tests/test_forms.py +import pytest + +from commcare_connect.solicitations_new.forms import ( + SolicitationForm, + SolicitationResponseForm, + ReviewForm, +) + + +class TestSolicitationForm: + def test_valid_minimal(self): + form = SolicitationForm(data={ + "title": "Test RFP", + "description": "A description", + "solicitation_type": "rfp", + "status": "draft", + "is_public": True, + "contact_email": "test@example.com", + }) + assert form.is_valid(), form.errors + + def test_missing_title(self): + form = SolicitationForm(data={ + "description": "A description", + "solicitation_type": "rfp", + "status": "draft", + }) + assert not form.is_valid() + assert "title" in form.errors + + +class TestSolicitationResponseForm: + def _questions(self): + return [ + {"id": "q1", "text": "Why apply?", "type": "textarea", "required": True}, + {"id": "q2", "text": "Team size?", "type": "number", "required": False}, + ] + + def test_valid_with_required_question(self): + form = SolicitationResponseForm( + questions=self._questions(), + data={"question_q1": "We are qualified"}, + ) + assert form.is_valid(), form.errors + + def test_missing_required_question(self): + form = SolicitationResponseForm( + questions=self._questions(), + data={}, + ) + assert not form.is_valid() + assert "question_q1" in form.errors + + +class TestReviewForm: + def test_valid(self): + form = ReviewForm(data={ + "score": 85, + "recommendation": "approved", + "notes": "Good application", + }) + assert form.is_valid(), form.errors + + def test_score_out_of_range(self): + form = ReviewForm(data={ + "score": 150, + "recommendation": "approved", + }) + assert not form.is_valid() +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest commcare_connect/solicitations_new/tests/test_forms.py -v` +Expected: FAIL — `ImportError` + +**Step 3: Implement the forms** + +```python +# commcare_connect/solicitations_new/forms.py +from django import forms + + +class SolicitationForm(forms.Form): + """Form for creating/editing a solicitation.""" + + title = forms.CharField(max_length=255) + description = forms.CharField(widget=forms.Textarea(attrs={"rows": 4})) + scope_of_work = forms.CharField(widget=forms.Textarea(attrs={"rows": 6}), required=False) + solicitation_type = forms.ChoiceField(choices=[("eoi", "Expression of Interest"), ("rfp", "Request for Proposals")]) + status = forms.ChoiceField(choices=[("draft", "Draft"), ("active", "Active"), ("closed", "Closed")]) + is_public = forms.BooleanField(required=False, initial=True) + application_deadline = forms.DateField(required=False, widget=forms.DateInput(attrs={"type": "date"})) + expected_start_date = forms.DateField(required=False, widget=forms.DateInput(attrs={"type": "date"})) + expected_end_date = forms.DateField(required=False, widget=forms.DateInput(attrs={"type": "date"})) + estimated_scale = forms.CharField(max_length=255, required=False) + contact_email = forms.EmailField(required=False) + # questions handled via Alpine.js, submitted as hidden JSON field + questions_json = forms.CharField(widget=forms.HiddenInput(), required=False) + + def to_data_dict(self): + """Convert cleaned form data to dict for data_access layer.""" + d = self.cleaned_data.copy() + import json + d["questions"] = json.loads(d.pop("questions_json", "[]") or "[]") + # Convert dates to strings + for key in ("application_deadline", "expected_start_date", "expected_end_date"): + val = d.get(key) + d[key] = val.isoformat() if val else None + return d + + +class SolicitationResponseForm(forms.Form): + """Dynamic form built from solicitation questions.""" + + def __init__(self, questions=None, *args, **kwargs): + super().__init__(*args, **kwargs) + for q in (questions or []): + q_id = q["id"] + field_name = f"question_{q_id}" + q_type = q.get("type", "text") + required = q.get("required", False) + label = q.get("text", "") + + if q_type == "textarea": + self.fields[field_name] = forms.CharField( + label=label, required=required, + widget=forms.Textarea(attrs={"rows": 3}), + ) + elif q_type == "number": + self.fields[field_name] = forms.IntegerField( + label=label, required=required, + ) + elif q_type == "multiple_choice": + choices = [(o, o) for o in q.get("options", [])] + self.fields[field_name] = forms.ChoiceField( + label=label, required=required, choices=choices, + ) + else: + self.fields[field_name] = forms.CharField( + label=label, required=required, + ) + + def get_responses_dict(self): + """Return {question_id: answer} dict.""" + result = {} + for key, val in self.cleaned_data.items(): + if key.startswith("question_"): + q_id = key[len("question_"):] + result[q_id] = val + return result + + +class ReviewForm(forms.Form): + """Form for reviewing a response.""" + + score = forms.IntegerField(min_value=1, max_value=100) + recommendation = forms.ChoiceField(choices=[ + ("under_review", "Under Review"), + ("approved", "Approved"), + ("rejected", "Rejected"), + ("needs_revision", "Needs Revision"), + ]) + notes = forms.CharField(widget=forms.Textarea(attrs={"rows": 4}), required=False) + tags = forms.CharField(max_length=255, required=False) +``` + +**Step 4: Run tests to verify they pass** + +Run: `pytest commcare_connect/solicitations_new/tests/test_forms.py -v` +Expected: All PASS + +**Step 5: Commit** + +```bash +git add commcare_connect/solicitations_new/forms.py commcare_connect/solicitations_new/tests/test_forms.py +git commit -m "feat(solicitations_new): add solicitation, response, and review forms" +``` + +--- + +## Task 5: Public Views (No Login Required) + +**Files:** +- Modify: `commcare_connect/solicitations_new/views.py` +- Modify: `commcare_connect/solicitations_new/urls.py` +- Create: `commcare_connect/templates/solicitations_new/public_list.html` +- Create: `commcare_connect/templates/solicitations_new/public_detail.html` + +**Step 1: Write the public views** + +Replace `commcare_connect/solicitations_new/views.py`: + +```python +# commcare_connect/solicitations_new/views.py +import json +import logging + +from django.conf import settings +from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin +from django.http import Http404, JsonResponse +from django.shortcuts import get_object_or_404, redirect, render +from django.utils import timezone +from django.views import View +from django.views.generic import TemplateView + +from commcare_connect.solicitations_new.data_access import SolicitationsNewDataAccess +from commcare_connect.solicitations_new.forms import ( + ReviewForm, + SolicitationForm, + SolicitationResponseForm, +) + +logger = logging.getLogger(__name__) + + +# ── Permission Mixins ────────────────────────────────────────── + +class LabsLoginRequiredMixin(LoginRequiredMixin): + """Redirect to labs login.""" + login_url = "/labs/login/" + + +class ManagerRequiredMixin(LabsLoginRequiredMixin, UserPassesTestMixin): + """Require authenticated labs user (manager access).""" + def test_func(self): + return getattr(self.request.user, "is_labs_user", False) + + +# ── Helpers ──────────────────────────────────────────────────── + +def _get_data_access(request): + """Create data access from request. Works for public (no token) and authed.""" + return SolicitationsNewDataAccess(request=request) + + +def _get_public_data_access(): + """Create data access for public endpoints (no auth token).""" + return SolicitationsNewDataAccess() + + +# ── Public Views (no login) ─────────────────────────────────── + +class PublicSolicitationListView(TemplateView): + template_name = "solicitations_new/public_list.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + solicitation_type = self.request.GET.get("type") + try: + da = _get_data_access(self.request) + ctx["solicitations"] = da.get_public_solicitations( + solicitation_type=solicitation_type, + ) + except Exception: + logger.exception("Failed to load public solicitations") + ctx["solicitations"] = [] + ctx["selected_type"] = solicitation_type or "" + return ctx + + +class PublicSolicitationDetailView(TemplateView): + template_name = "solicitations_new/public_detail.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + pk = kwargs["pk"] + try: + da = _get_data_access(self.request) + solicitation = da.get_solicitation_by_id(pk) + if not solicitation or not solicitation.is_public: + raise Http404("Solicitation not found") + ctx["solicitation"] = solicitation + except Http404: + raise + except Exception: + logger.exception("Failed to load solicitation %s", pk) + raise Http404("Solicitation not found") + return ctx + + +# ── Manager Views ───────────────────────────────────────────── +# (Implemented in Task 6) + + +# ── Response Views ──────────────────────────────────────────── +# (Implemented in Task 7) + + +# ── Review Views ────────────────────────────────────────────── +# (Implemented in Task 8) +``` + +**Step 2: Update URLs** + +```python +# commcare_connect/solicitations_new/urls.py +from django.urls import path + +from . import views + +app_name = "solicitations_new" + +urlpatterns = [ + # Public (no login required) + path("", views.PublicSolicitationListView.as_view(), name="public_list"), + path("/", views.PublicSolicitationDetailView.as_view(), name="public_detail"), +] +``` + +**Step 3: Create the public list template** + +```html +{# commcare_connect/templates/solicitations_new/public_list.html #} +{% extends "base.html" %} + +{% block content %} +
+
+

Open Solicitations

+

Browse active requests for proposals and expressions of interest.

+
+ + {# Type filter #} + + + {# Solicitation cards #} + {% if solicitations %} + + {% else %} +
+ +

No open solicitations at this time.

+
+ {% endif %} +
+{% endblock %} +``` + +**Step 4: Create the public detail template** + +```html +{# commcare_connect/templates/solicitations_new/public_detail.html #} +{% extends "base.html" %} + +{% block content %} +
+ {# Breadcrumb #} + + + {# Header #} +
+
+ + {{ solicitation.solicitation_type|upper }} + + {% if solicitation.program_name %} + {{ solicitation.program_name }} + {% endif %} +
+

{{ solicitation.title }}

+

{{ solicitation.description }}

+
+ + {# Details grid #} +
+ {% if solicitation.scope_of_work %} +
+

Scope of Work

+

{{ solicitation.scope_of_work }}

+
+ {% endif %} + +
+

Key Dates

+
+ {% if solicitation.application_deadline %} +
+
Application Deadline
+
{{ solicitation.application_deadline|date:"M d, Y" }}
+
+ {% endif %} + {% if solicitation.expected_start_date %} +
+
Expected Start
+
{{ solicitation.expected_start_date|date:"M d, Y" }}
+
+ {% endif %} + {% if solicitation.expected_end_date %} +
+
Expected End
+
{{ solicitation.expected_end_date|date:"M d, Y" }}
+
+ {% endif %} +
+
+ +
+

Details

+
+ {% if solicitation.estimated_scale %} +
+
Estimated Scale
+
{{ solicitation.estimated_scale }}
+
+ {% endif %} + {% if solicitation.contact_email %} +
+
Contact
+
{{ solicitation.contact_email }}
+
+ {% endif %} +
+
+
+ + {# Questions preview #} + {% if solicitation.questions %} +
+

Response Questions

+
    + {% for q in solicitation.questions %} +
  1. + {{ q.text }} + {% if q.required %}(Required){% endif %} +
  2. + {% endfor %} +
+
+ {% endif %} + + {# CTA #} + {% if solicitation.can_accept_responses %} + + {% else %} +
+ This solicitation is no longer accepting responses. +
+ {% endif %} +
+{% endblock %} +``` + +**Step 5: Verify templates load (manual check)** + +Run: `python manage.py shell -c "from django.template.loader import get_template; get_template('solicitations_new/public_list.html'); print('OK')"` +Expected: `OK` + +**Step 6: Commit** + +```bash +git add commcare_connect/solicitations_new/views.py commcare_connect/solicitations_new/urls.py commcare_connect/templates/solicitations_new/ +git commit -m "feat(solicitations_new): add public list and detail views with templates" +``` + +--- + +## Task 6: Manager Views & Templates + +**Files:** +- Modify: `commcare_connect/solicitations_new/views.py` (add manager views) +- Modify: `commcare_connect/solicitations_new/urls.py` (add manager URLs) +- Create: `commcare_connect/templates/solicitations_new/manage_list.html` +- Create: `commcare_connect/templates/solicitations_new/solicitation_form.html` +- Create: `commcare_connect/templates/solicitations_new/responses_list.html` + +**Step 1: Add manager views to views.py** + +Append after the public views section: + +```python +# ── Manager Views ───────────────────────────────────────────── + +class ManageSolicitationsView(ManagerRequiredMixin, TemplateView): + template_name = "solicitations_new/manage_list.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + try: + da = _get_data_access(self.request) + solicitations = da.get_solicitations() + # Add response count to each solicitation + for s in solicitations: + responses = da.get_responses_for_solicitation(s.pk) + s._response_count = len(responses) + ctx["solicitations"] = solicitations + except Exception: + logger.exception("Failed to load managed solicitations") + ctx["solicitations"] = [] + return ctx + + +class SolicitationCreateView(ManagerRequiredMixin, TemplateView): + template_name = "solicitations_new/solicitation_form.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + ctx["form"] = SolicitationForm() + ctx["is_create"] = True + return ctx + + def post(self, request, *args, **kwargs): + form = SolicitationForm(request.POST) + if form.is_valid(): + data = form.to_data_dict() + data["created_by"] = getattr(request.user, "username", "") + program_name = request.labs_context.get("program_name", "") if hasattr(request, "labs_context") else "" + data["program_name"] = program_name + try: + da = _get_data_access(request) + da.create_solicitation(data) + return redirect("solicitations_new:manage_list") + except Exception: + logger.exception("Failed to create solicitation") + form.add_error(None, "Failed to create solicitation. Please try again.") + return render(request, self.template_name, {"form": form, "is_create": True}) + + +class SolicitationEditView(ManagerRequiredMixin, TemplateView): + template_name = "solicitations_new/solicitation_form.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + pk = kwargs["pk"] + da = _get_data_access(self.request) + solicitation = da.get_solicitation_by_id(pk) + if not solicitation: + raise Http404 + import json + initial = { + "title": solicitation.title, + "description": solicitation.description, + "scope_of_work": solicitation.scope_of_work, + "solicitation_type": solicitation.solicitation_type, + "status": solicitation.status, + "is_public": solicitation.is_public, + "application_deadline": solicitation.application_deadline, + "expected_start_date": solicitation.expected_start_date, + "expected_end_date": solicitation.expected_end_date, + "estimated_scale": solicitation.estimated_scale, + "contact_email": solicitation.contact_email, + "questions_json": json.dumps(solicitation.questions), + } + ctx["form"] = SolicitationForm(initial=initial) + ctx["solicitation"] = solicitation + ctx["is_create"] = False + ctx["existing_questions_json"] = json.dumps(solicitation.questions) + return ctx + + def post(self, request, *args, **kwargs): + pk = kwargs["pk"] + form = SolicitationForm(request.POST) + if form.is_valid(): + data = form.to_data_dict() + try: + da = _get_data_access(request) + da.update_solicitation(pk, data) + return redirect("solicitations_new:manage_list") + except Exception: + logger.exception("Failed to update solicitation %s", pk) + form.add_error(None, "Failed to update solicitation.") + return render(request, self.template_name, {"form": form, "is_create": False}) + + +class ResponsesListView(ManagerRequiredMixin, TemplateView): + template_name = "solicitations_new/responses_list.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + pk = kwargs["pk"] + try: + da = _get_data_access(self.request) + solicitation = da.get_solicitation_by_id(pk) + if not solicitation: + raise Http404 + responses = da.get_responses_for_solicitation(pk) + # Attach reviews to each response + for resp in responses: + resp._reviews = da.get_reviews_for_response(resp.pk) + resp._latest_review = resp._reviews[-1] if resp._reviews else None + ctx["solicitation"] = solicitation + ctx["responses"] = responses + except Http404: + raise + except Exception: + logger.exception("Failed to load responses for solicitation %s", pk) + ctx["solicitation"] = None + ctx["responses"] = [] + return ctx +``` + +**Step 2: Add manager URLs** + +```python +# Add to urlpatterns in urls.py: + # Manager (login required) + path("manage/", views.ManageSolicitationsView.as_view(), name="manage_list"), + path("create/", views.SolicitationCreateView.as_view(), name="create"), + path("/edit/", views.SolicitationEditView.as_view(), name="edit"), + path("/responses/", views.ResponsesListView.as_view(), name="responses_list"), +``` + +**Step 3: Create manager templates** + +Create `commcare_connect/templates/solicitations_new/manage_list.html`: +```html +{% extends "base.html" %} + +{% block content %} +
+
+

Manage Solicitations

+ + Create Solicitation + +
+ + {% if solicitations %} +
+ + + + + + + + + + + + + {% for s in solicitations %} + + + + + + + + + {% endfor %} + +
TitleTypeStatusDeadlineResponsesActions
{{ s.title }} + + {{ s.solicitation_type|upper }} + + + + {{ s.status|title }} + + + {{ s.application_deadline|date:"M d, Y"|default:"—" }} + {{ s._response_count }} + Edit + Responses +
+
+ {% else %} +
+ +

No solicitations yet.

+ Create your first solicitation +
+ {% endif %} +
+{% endblock %} +``` + +Create `commcare_connect/templates/solicitations_new/solicitation_form.html`: +```html +{% extends "base.html" %} +{% load crispy_forms_tags %} + +{% block content %} +
+ + +

+ {% if is_create %}Create Solicitation{% else %}Edit Solicitation{% endif %} +

+ + {% if form.non_field_errors %} +
+ {% for error in form.non_field_errors %} +

{{ error }}

+ {% endfor %} +
+ {% endif %} + +
+ {% csrf_token %} + +
+

Basic Information

+ {{ form.title|as_crispy_field }} + {{ form.description|as_crispy_field }} + {{ form.scope_of_work|as_crispy_field }} +
+ {{ form.solicitation_type|as_crispy_field }} + {{ form.status|as_crispy_field }} +
+
{{ form.is_public|as_crispy_field }}
+
+ +
+

Dates & Details

+
+ {{ form.application_deadline|as_crispy_field }} + {{ form.expected_start_date|as_crispy_field }} + {{ form.expected_end_date|as_crispy_field }} +
+ {{ form.estimated_scale|as_crispy_field }} + {{ form.contact_email|as_crispy_field }} +
+ + {# Dynamic question builder (Alpine.js) #} +
+
+

Questions

+ +
+ + + +

+ No questions added yet. Click "Add Question" to get started. +

+ + {{ form.questions_json }} +
+ +
+ Cancel + +
+
+
+ + +{% endblock %} +``` + +Create `commcare_connect/templates/solicitations_new/responses_list.html`: +```html +{% extends "base.html" %} + +{% block content %} +
+ + +

Responses to: {{ solicitation.title }}

+ + {% if responses %} +
+ + + + + + + + + + + + + + {% for r in responses %} + + + + + + + + + + {% endfor %} + +
OrganizationSubmitted ByStatusDateRecommendationScoreActions
{{ r.llo_entity_name }}{{ r.submitted_by_name }} + + {{ r.status|title }} + + {{ r.submission_date|date:"M d, Y"|default:"—" }} + {% if r._latest_review %}{{ r._latest_review.recommendation|title }}{% else %}—{% endif %} + + {% if r._latest_review %}{{ r._latest_review.score }}{% else %}—{% endif %} + + View + Review +
+
+ {% else %} +
+

No responses yet.

+
+ {% endif %} +
+{% endblock %} +``` + +**Step 4: Commit** + +```bash +git add commcare_connect/solicitations_new/views.py commcare_connect/solicitations_new/urls.py commcare_connect/templates/solicitations_new/ +git commit -m "feat(solicitations_new): add manager views — manage list, create, edit, responses list" +``` + +--- + +## Task 7: Response Views & Templates + +**Files:** +- Modify: `commcare_connect/solicitations_new/views.py` +- Modify: `commcare_connect/solicitations_new/urls.py` +- Create: `commcare_connect/templates/solicitations_new/respond.html` +- Create: `commcare_connect/templates/solicitations_new/response_detail.html` + +**Step 1: Add response views to views.py** + +Append after manager views: + +```python +# ── Response Views ──────────────────────────────────────────── + +class RespondView(LabsLoginRequiredMixin, TemplateView): + template_name = "solicitations_new/respond.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + pk = kwargs["pk"] + da = _get_data_access(self.request) + solicitation = da.get_solicitation_by_id(pk) + if not solicitation or not solicitation.can_accept_responses(): + raise Http404 + ctx["solicitation"] = solicitation + ctx["form"] = SolicitationResponseForm(questions=solicitation.questions) + # Get user's LLO entities from session + user = self.request.user + ctx["llo_entities"] = getattr(user, "organizations", []) if hasattr(user, "organizations") else [] + return ctx + + def post(self, request, *args, **kwargs): + pk = kwargs["pk"] + da = _get_data_access(request) + solicitation = da.get_solicitation_by_id(pk) + if not solicitation or not solicitation.can_accept_responses(): + raise Http404 + + form = SolicitationResponseForm(questions=solicitation.questions, data=request.POST) + llo_entity_id = request.POST.get("llo_entity_id", "") + llo_entity_name = request.POST.get("llo_entity_name", "") + create_new = request.POST.get("create_new_entity") == "on" + + if create_new: + new_name = request.POST.get("new_entity_name", "").strip() + new_short = request.POST.get("new_entity_short_name", "").strip() + if not new_name: + form.add_error(None, "New entity name is required.") + return render(request, self.template_name, { + "solicitation": solicitation, "form": form, "llo_entities": [], + }) + llo_entity_id = f"new_{new_short or new_name}".lower().replace(" ", "_") + llo_entity_name = new_name + + if form.is_valid() and llo_entity_id: + is_draft = request.POST.get("action") == "save_draft" + data = { + "responses": form.get_responses_dict(), + "llo_entity_name": llo_entity_name, + "status": "draft" if is_draft else "submitted", + "submitted_by_name": getattr(request.user, "name", ""), + "submitted_by_email": getattr(request.user, "email", ""), + } + if not is_draft: + data["submission_date"] = timezone.now().isoformat() + try: + da.create_response( + solicitation_id=pk, + llo_entity_id=llo_entity_id, + data=data, + ) + return redirect("solicitations_new:public_detail", pk=pk) + except Exception: + logger.exception("Failed to create response") + form.add_error(None, "Failed to submit response.") + + return render(request, self.template_name, { + "solicitation": solicitation, "form": form, "llo_entities": [], + }) + + +class ResponseDetailView(LabsLoginRequiredMixin, TemplateView): + template_name = "solicitations_new/response_detail.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + pk = kwargs["pk"] + da = _get_data_access(self.request) + response = da.get_response_by_id(pk) + if not response: + raise Http404 + solicitation = da.get_solicitation_by_id(response.solicitation_id) + reviews = da.get_reviews_for_response(pk) + ctx["response"] = response + ctx["solicitation"] = solicitation + ctx["reviews"] = reviews + # Build Q&A pairs + qa_pairs = [] + if solicitation: + q_map = {q["id"]: q for q in solicitation.questions} + for q_id, answer in response.responses.items(): + q = q_map.get(q_id, {"text": q_id}) + qa_pairs.append({"question": q.get("text", q_id), "answer": answer}) + ctx["qa_pairs"] = qa_pairs + return ctx +``` + +**Step 2: Add response URLs** + +```python +# Add to urlpatterns: + path("/respond/", views.RespondView.as_view(), name="respond"), + path("response//", views.ResponseDetailView.as_view(), name="response_detail"), +``` + +**Step 3: Create the respond template** + +Create `commcare_connect/templates/solicitations_new/respond.html`: +```html +{% extends "base.html" %} +{% load crispy_forms_tags %} + +{% block content %} +
+ + +

Respond to: {{ solicitation.title }}

+

{{ solicitation.solicitation_type|upper }} — Deadline: {{ solicitation.application_deadline|date:"M d, Y"|default:"None" }}

+ + {% if form.non_field_errors %} +
+ {% for error in form.non_field_errors %} +

{{ error }}

+ {% endfor %} +
+ {% endif %} + +
+ {% csrf_token %} + + {# LLO Entity selection #} +
+

Responding Organization

+ + {% if llo_entities %} +
+ + + +
+ {% endif %} + +
+ +
+ +
+
+ + +
+
+ + +
+
+
+ + {# Questions #} +
+

Questions

+ {% for field in form %} + {{ field|as_crispy_field }} + {% endfor %} +
+ + {# Actions #} +
+ + +
+
+
+ + +{% endblock %} +``` + +Create `commcare_connect/templates/solicitations_new/response_detail.html`: +```html +{% extends "base.html" %} + +{% block content %} +
+ + +
+
+

{{ response.llo_entity_name }}

+ + {{ response.status|title }} + +
+
+
+
Submitted By
+
{{ response.submitted_by_name }} ({{ response.submitted_by_email }})
+
+
+
Submission Date
+
{{ response.submission_date|date:"M d, Y H:i"|default:"—" }}
+
+
+
+ + {# Q&A pairs #} +
+

Responses

+ {% for qa in qa_pairs %} +
+

{{ qa.question }}

+

{{ qa.answer }}

+
+ {% endfor %} +
+ + {# Reviews #} + {% if reviews %} +
+

Reviews

+ {% for rev in reviews %} +
+
+ {{ rev.reviewer_username }} + Score: {{ rev.score }}/100 +
+ {{ rev.recommendation|title }} + {% if rev.notes %}

{{ rev.notes }}

{% endif %} +
+ {% endfor %} +
+ {% endif %} + + {# Review action #} + +
+{% endblock %} +``` + +**Step 4: Commit** + +```bash +git add commcare_connect/solicitations_new/views.py commcare_connect/solicitations_new/urls.py commcare_connect/templates/solicitations_new/ +git commit -m "feat(solicitations_new): add respond and response detail views with templates" +``` + +--- + +## Task 8: Review View & Template + +**Files:** +- Modify: `commcare_connect/solicitations_new/views.py` +- Modify: `commcare_connect/solicitations_new/urls.py` +- Create: `commcare_connect/templates/solicitations_new/review_form.html` + +**Step 1: Add review view to views.py** + +Append after response views: + +```python +# ── Review Views ────────────────────────────────────────────── + +class ReviewView(ManagerRequiredMixin, TemplateView): + template_name = "solicitations_new/review_form.html" + + def get_context_data(self, **kwargs): + ctx = super().get_context_data(**kwargs) + pk = kwargs["pk"] # response_id + da = _get_data_access(self.request) + response = da.get_response_by_id(pk) + if not response: + raise Http404 + solicitation = da.get_solicitation_by_id(response.solicitation_id) + # Build Q&A pairs + qa_pairs = [] + if solicitation: + q_map = {q["id"]: q for q in solicitation.questions} + for q_id, answer in response.responses.items(): + q = q_map.get(q_id, {"text": q_id}) + qa_pairs.append({"question": q.get("text", q_id), "answer": answer}) + # Check for existing review by this user + reviews = da.get_reviews_for_response(pk) + username = getattr(self.request.user, "username", "") + existing = next((r for r in reviews if r.reviewer_username == username), None) + initial = {} + if existing: + initial = { + "score": existing.score, + "recommendation": existing.recommendation, + "notes": existing.notes, + "tags": existing.tags, + } + ctx["form"] = ReviewForm(initial=initial) + ctx["response"] = response + ctx["solicitation"] = solicitation + ctx["qa_pairs"] = qa_pairs + ctx["existing_review"] = existing + return ctx + + def post(self, request, *args, **kwargs): + pk = kwargs["pk"] # response_id + form = ReviewForm(request.POST) + if form.is_valid(): + da = _get_data_access(request) + data = form.cleaned_data.copy() + data["reviewer_username"] = getattr(request.user, "username", "") + data["review_date"] = timezone.now().isoformat() + # Check for existing review to update + reviews = da.get_reviews_for_response(pk) + username = data["reviewer_username"] + existing = next((r for r in reviews if r.reviewer_username == username), None) + try: + if existing: + da.update_review(existing.pk, data) + else: + da.create_review(response_id=pk, data=data) + return redirect("solicitations_new:response_detail", pk=pk) + except Exception: + logger.exception("Failed to save review") + form.add_error(None, "Failed to save review.") + + da = _get_data_access(request) + response = da.get_response_by_id(pk) + solicitation = da.get_solicitation_by_id(response.solicitation_id) if response else None + return render(request, self.template_name, { + "form": form, "response": response, "solicitation": solicitation, + "qa_pairs": [], "existing_review": None, + }) +``` + +**Step 2: Add review URL** + +```python +# Add to urlpatterns: + path("response//review/", views.ReviewView.as_view(), name="review"), +``` + +**Step 3: Create the review template** + +Create `commcare_connect/templates/solicitations_new/review_form.html`: +```html +{% extends "base.html" %} +{% load crispy_forms_tags %} + +{% block content %} +
+ + +

+ Review: {{ response.llo_entity_name }} +

+ + {# Q&A summary #} +
+

Response Summary

+ {% for qa in qa_pairs %} +
+

{{ qa.question }}

+

{{ qa.answer }}

+
+ {% endfor %} +
+ + {# Review form #} +
+ {% csrf_token %} + + {% if form.non_field_errors %} +
+ {% for error in form.non_field_errors %} +

{{ error }}

+ {% endfor %} +
+ {% endif %} + + {{ form.score|as_crispy_field }} + {{ form.recommendation|as_crispy_field }} + {{ form.notes|as_crispy_field }} + {{ form.tags|as_crispy_field }} + +
+ Cancel + +
+
+
+{% endblock %} +``` + +**Step 4: Commit** + +```bash +git add commcare_connect/solicitations_new/views.py commcare_connect/solicitations_new/urls.py commcare_connect/templates/solicitations_new/review_form.html +git commit -m "feat(solicitations_new): add review view and template" +``` + +--- + +## Task 9: JSON API Views + +**Files:** +- Create: `commcare_connect/solicitations_new/api_views.py` +- Create: `commcare_connect/solicitations_new/tests/test_api_views.py` +- Modify: `commcare_connect/solicitations_new/urls.py` + +**Step 1: Write the failing tests** + +```python +# commcare_connect/solicitations_new/tests/test_api_views.py +import json +from unittest.mock import MagicMock, patch + +import pytest +from django.test import RequestFactory + +from commcare_connect.solicitations_new.api_views import ( + api_solicitations_list, + api_solicitation_detail, +) + + +@pytest.fixture +def rf(): + return RequestFactory() + + +@pytest.fixture +def mock_da(): + with patch("commcare_connect.solicitations_new.api_views._get_data_access") as mock: + da = MagicMock() + mock.return_value = da + yield da + + +class TestAPISolicitationsList: + def test_get_returns_json(self, rf, mock_da): + mock_da.get_public_solicitations.return_value = [] + request = rf.get("/solicitations_new/api/solicitations/") + response = api_solicitations_list(request) + assert response.status_code == 200 + data = json.loads(response.content) + assert "solicitations" in data +``` + +**Step 2: Run tests to verify they fail** + +Run: `pytest commcare_connect/solicitations_new/tests/test_api_views.py -v` +Expected: FAIL — `ImportError` + +**Step 3: Implement the API views** + +```python +# commcare_connect/solicitations_new/api_views.py +import json +import logging + +from django.http import JsonResponse +from django.views.decorators.csrf import csrf_exempt +from django.views.decorators.http import require_http_methods + +from commcare_connect.solicitations_new.data_access import SolicitationsNewDataAccess + +logger = logging.getLogger(__name__) + + +def _get_data_access(request): + return SolicitationsNewDataAccess(request=request) + + +def _serialize_solicitation(s): + return { + "id": s.pk, + "title": s.title, + "description": s.description, + "scope_of_work": s.scope_of_work, + "solicitation_type": s.solicitation_type, + "status": s.status, + "is_public": s.is_public, + "questions": s.questions, + "application_deadline": s.application_deadline.isoformat() if s.application_deadline else None, + "expected_start_date": s.expected_start_date.isoformat() if s.expected_start_date else None, + "expected_end_date": s.expected_end_date.isoformat() if s.expected_end_date else None, + "estimated_scale": s.estimated_scale, + "contact_email": s.contact_email, + "created_by": s.created_by, + "program_name": s.program_name, + } + + +def _serialize_response(r): + return { + "id": r.pk, + "solicitation_id": r.solicitation_id, + "llo_entity_id": r.llo_entity_id, + "llo_entity_name": r.llo_entity_name, + "responses": r.responses, + "status": r.status, + "submitted_by_name": r.submitted_by_name, + "submitted_by_email": r.submitted_by_email, + "submission_date": r.submission_date.isoformat() if r.submission_date else None, + } + + +def _serialize_review(r): + return { + "id": r.pk, + "response_id": r.response_id, + "score": r.score, + "recommendation": r.recommendation, + "notes": r.notes, + "tags": r.tags, + "reviewer_username": r.reviewer_username, + "review_date": r.review_date.isoformat() if r.review_date else None, + } + + +# ── Solicitations ───────────────────────────────────────────── + +@csrf_exempt +@require_http_methods(["GET", "POST"]) +def api_solicitations_list(request): + da = _get_data_access(request) + if request.method == "GET": + status = request.GET.get("status") + sol_type = request.GET.get("type") + is_public = request.GET.get("is_public") + if is_public == "true": + results = da.get_public_solicitations(solicitation_type=sol_type) + else: + results = da.get_solicitations(status=status, solicitation_type=sol_type) + return JsonResponse({"solicitations": [_serialize_solicitation(s) for s in results]}) + + # POST — create + try: + body = json.loads(request.body) + result = da.create_solicitation(body) + return JsonResponse({"solicitation": _serialize_solicitation(result)}, status=201) + except Exception as e: + logger.exception("API create solicitation failed") + return JsonResponse({"error": str(e)}, status=400) + + +@csrf_exempt +@require_http_methods(["GET", "PUT"]) +def api_solicitation_detail(request, pk): + da = _get_data_access(request) + if request.method == "GET": + result = da.get_solicitation_by_id(pk) + if not result: + return JsonResponse({"error": "Not found"}, status=404) + return JsonResponse({"solicitation": _serialize_solicitation(result)}) + + # PUT — update + try: + body = json.loads(request.body) + result = da.update_solicitation(pk, body) + return JsonResponse({"solicitation": _serialize_solicitation(result)}) + except Exception as e: + logger.exception("API update solicitation %s failed", pk) + return JsonResponse({"error": str(e)}, status=400) + + +# ── Responses ───────────────────────────────────────────────── + +@csrf_exempt +@require_http_methods(["GET", "POST"]) +def api_responses_list(request): + da = _get_data_access(request) + if request.method == "GET": + solicitation_id = request.GET.get("solicitation_id") + if not solicitation_id: + return JsonResponse({"error": "solicitation_id required"}, status=400) + results = da.get_responses_for_solicitation(int(solicitation_id)) + return JsonResponse({"responses": [_serialize_response(r) for r in results]}) + + # POST — create + try: + body = json.loads(request.body) + result = da.create_response( + solicitation_id=body.pop("solicitation_id"), + llo_entity_id=body.pop("llo_entity_id"), + data=body, + ) + return JsonResponse({"response": _serialize_response(result)}, status=201) + except Exception as e: + logger.exception("API create response failed") + return JsonResponse({"error": str(e)}, status=400) + + +@csrf_exempt +@require_http_methods(["GET", "PUT"]) +def api_response_detail(request, pk): + da = _get_data_access(request) + if request.method == "GET": + result = da.get_response_by_id(pk) + if not result: + return JsonResponse({"error": "Not found"}, status=404) + return JsonResponse({"response": _serialize_response(result)}) + + try: + body = json.loads(request.body) + result = da.update_response(pk, body) + return JsonResponse({"response": _serialize_response(result)}) + except Exception as e: + logger.exception("API update response %s failed", pk) + return JsonResponse({"error": str(e)}, status=400) + + +# ── Reviews ─────────────────────────────────────────────────── + +@csrf_exempt +@require_http_methods(["POST"]) +def api_reviews_create(request): + da = _get_data_access(request) + try: + body = json.loads(request.body) + result = da.create_review( + response_id=body.pop("response_id"), + data=body, + ) + return JsonResponse({"review": _serialize_review(result)}, status=201) + except Exception as e: + logger.exception("API create review failed") + return JsonResponse({"error": str(e)}, status=400) + + +@csrf_exempt +@require_http_methods(["GET", "PUT"]) +def api_review_detail(request, pk): + da = _get_data_access(request) + if request.method == "GET": + result = da.get_review_by_id(pk) + if not result: + return JsonResponse({"error": "Not found"}, status=404) + return JsonResponse({"review": _serialize_review(result)}) + + try: + body = json.loads(request.body) + result = da.update_review(pk, body) + return JsonResponse({"review": _serialize_review(result)}) + except Exception as e: + logger.exception("API update review %s failed", pk) + return JsonResponse({"error": str(e)}, status=400) +``` + +**Step 4: Add API URLs** + +Add to `urls.py` urlpatterns: +```python +from . import api_views + + # JSON API + path("api/solicitations/", api_views.api_solicitations_list, name="api_solicitations_list"), + path("api/solicitations//", api_views.api_solicitation_detail, name="api_solicitation_detail"), + path("api/responses/", api_views.api_responses_list, name="api_responses_list"), + path("api/responses//", api_views.api_response_detail, name="api_response_detail"), + path("api/reviews/", api_views.api_reviews_create, name="api_reviews_create"), + path("api/reviews//", api_views.api_review_detail, name="api_review_detail"), +``` + +**Step 5: Run tests** + +Run: `pytest commcare_connect/solicitations_new/tests/test_api_views.py -v` +Expected: All PASS + +**Step 6: Commit** + +```bash +git add commcare_connect/solicitations_new/api_views.py commcare_connect/solicitations_new/urls.py commcare_connect/solicitations_new/tests/test_api_views.py +git commit -m "feat(solicitations_new): add JSON API views for solicitations, responses, reviews" +``` + +--- + +## Task 10: MCP Tools + +**Files:** +- Create: `commcare_connect/solicitations_new/mcp_tools.py` + +**Step 1: Implement MCP tools** + +```python +# commcare_connect/solicitations_new/mcp_tools.py +"""MCP tool definitions for solicitations_new. + +These functions call data_access directly and are registered +with the MCP server for AI agent access. +""" + +from commcare_connect.solicitations_new.data_access import SolicitationsNewDataAccess + + +def _serialize_solicitation(s): + return { + "id": s.pk, + "title": s.title, + "description": s.description, + "solicitation_type": s.solicitation_type, + "status": s.status, + "is_public": s.is_public, + "application_deadline": s.application_deadline.isoformat() if s.application_deadline else None, + "estimated_scale": s.estimated_scale, + "program_name": s.program_name, + } + + +def _serialize_response(r): + return { + "id": r.pk, + "solicitation_id": r.solicitation_id, + "llo_entity_id": r.llo_entity_id, + "llo_entity_name": r.llo_entity_name, + "status": r.status, + "submitted_by_name": r.submitted_by_name, + } + + +def _serialize_review(r): + return { + "id": r.pk, + "response_id": r.response_id, + "score": r.score, + "recommendation": r.recommendation, + "reviewer_username": r.reviewer_username, + } + + +def list_solicitations(access_token, program_id=None, status=None, + solicitation_type=None, is_public=None): + """List solicitations, optionally filtered.""" + da = SolicitationsNewDataAccess(program_id=program_id, access_token=access_token) + if is_public: + results = da.get_public_solicitations(solicitation_type=solicitation_type) + else: + results = da.get_solicitations(status=status, solicitation_type=solicitation_type) + return [_serialize_solicitation(s) for s in results] + + +def get_solicitation(access_token, solicitation_id): + """Get a single solicitation by ID.""" + da = SolicitationsNewDataAccess(access_token=access_token) + result = da.get_solicitation_by_id(solicitation_id) + return _serialize_solicitation(result) if result else None + + +def create_solicitation(access_token, program_id, data): + """Create a new solicitation.""" + da = SolicitationsNewDataAccess(program_id=program_id, access_token=access_token) + result = da.create_solicitation(data) + return _serialize_solicitation(result) + + +def update_solicitation(access_token, solicitation_id, data): + """Update an existing solicitation.""" + da = SolicitationsNewDataAccess(access_token=access_token) + result = da.update_solicitation(solicitation_id, data) + return _serialize_solicitation(result) + + +def list_responses(access_token, solicitation_id, status=None): + """List responses for a solicitation.""" + da = SolicitationsNewDataAccess(access_token=access_token) + results = da.get_responses_for_solicitation(solicitation_id) + if status: + results = [r for r in results if r.status == status] + return [_serialize_response(r) for r in results] + + +def get_response(access_token, response_id): + """Get a single response by ID.""" + da = SolicitationsNewDataAccess(access_token=access_token) + result = da.get_response_by_id(response_id) + return _serialize_response(result) if result else None + + +def create_response(access_token, solicitation_id, llo_entity_id, data): + """Create a new response to a solicitation.""" + da = SolicitationsNewDataAccess(access_token=access_token) + result = da.create_response(solicitation_id, llo_entity_id, data) + return _serialize_response(result) + + +def create_review(access_token, response_id, data): + """Create a review for a response.""" + da = SolicitationsNewDataAccess(access_token=access_token) + result = da.create_review(response_id, data) + return _serialize_review(result) + + +def update_review(access_token, review_id, data): + """Update an existing review.""" + da = SolicitationsNewDataAccess(access_token=access_token) + result = da.update_review(review_id, data) + return _serialize_review(result) +``` + +**Step 2: Commit** + +```bash +git add commcare_connect/solicitations_new/mcp_tools.py +git commit -m "feat(solicitations_new): add MCP tool definitions for AI agent access" +``` + +--- + +## Task 11: Final URL Assembly & Integration Test + +**Files:** +- Modify: `commcare_connect/solicitations_new/urls.py` (ensure all routes assembled) +- Create: `commcare_connect/solicitations_new/tests/test_urls.py` + +**Step 1: Write the complete urls.py** + +```python +# commcare_connect/solicitations_new/urls.py +from django.urls import path + +from . import api_views, views + +app_name = "solicitations_new" + +urlpatterns = [ + # Public (no login required) + path("", views.PublicSolicitationListView.as_view(), name="public_list"), + path("/", views.PublicSolicitationDetailView.as_view(), name="public_detail"), + # Manager (login required) + path("manage/", views.ManageSolicitationsView.as_view(), name="manage_list"), + path("create/", views.SolicitationCreateView.as_view(), name="create"), + path("/edit/", views.SolicitationEditView.as_view(), name="edit"), + path("/responses/", views.ResponsesListView.as_view(), name="responses_list"), + # Response (login required) + path("/respond/", views.RespondView.as_view(), name="respond"), + path("response//", views.ResponseDetailView.as_view(), name="response_detail"), + # Review (manager required) + path("response//review/", views.ReviewView.as_view(), name="review"), + # JSON API + path("api/solicitations/", api_views.api_solicitations_list, name="api_solicitations_list"), + path("api/solicitations//", api_views.api_solicitation_detail, name="api_solicitation_detail"), + path("api/responses/", api_views.api_responses_list, name="api_responses_list"), + path("api/responses//", api_views.api_response_detail, name="api_response_detail"), + path("api/reviews/", api_views.api_reviews_create, name="api_reviews_create"), + path("api/reviews//", api_views.api_review_detail, name="api_review_detail"), +] +``` + +**Step 2: Write URL resolution tests** + +```python +# commcare_connect/solicitations_new/tests/test_urls.py +import pytest +from django.urls import resolve, reverse + + +class TestURLResolution: + def test_public_list(self): + url = reverse("solicitations_new:public_list") + assert url == "/solicitations_new/" + + def test_public_detail(self): + url = reverse("solicitations_new:public_detail", kwargs={"pk": 1}) + assert url == "/solicitations_new/1/" + + def test_manage_list(self): + url = reverse("solicitations_new:manage_list") + assert url == "/solicitations_new/manage/" + + def test_create(self): + url = reverse("solicitations_new:create") + assert url == "/solicitations_new/create/" + + def test_edit(self): + url = reverse("solicitations_new:edit", kwargs={"pk": 1}) + assert url == "/solicitations_new/1/edit/" + + def test_responses_list(self): + url = reverse("solicitations_new:responses_list", kwargs={"pk": 1}) + assert url == "/solicitations_new/1/responses/" + + def test_respond(self): + url = reverse("solicitations_new:respond", kwargs={"pk": 1}) + assert url == "/solicitations_new/1/respond/" + + def test_response_detail(self): + url = reverse("solicitations_new:response_detail", kwargs={"pk": 1}) + assert url == "/solicitations_new/response/1/" + + def test_review(self): + url = reverse("solicitations_new:review", kwargs={"pk": 1}) + assert url == "/solicitations_new/response/1/review/" + + def test_api_solicitations_list(self): + url = reverse("solicitations_new:api_solicitations_list") + assert url == "/solicitations_new/api/solicitations/" + + def test_api_solicitation_detail(self): + url = reverse("solicitations_new:api_solicitation_detail", kwargs={"pk": 1}) + assert url == "/solicitations_new/api/solicitations/1/" + + def test_api_responses_list(self): + url = reverse("solicitations_new:api_responses_list") + assert url == "/solicitations_new/api/responses/" + + def test_api_reviews_create(self): + url = reverse("solicitations_new:api_reviews_create") + assert url == "/solicitations_new/api/reviews/" +``` + +**Step 3: Run all tests** + +Run: `pytest commcare_connect/solicitations_new/ -v` +Expected: All PASS + +**Step 4: Commit** + +```bash +git add commcare_connect/solicitations_new/ +git commit -m "feat(solicitations_new): finalize URL routing and add URL resolution tests" +``` + +--- + +## Summary + +| Task | Description | Files | +|------|-------------|-------| +| 1 | App scaffolding & registration | apps.py, settings, urls, middleware | +| 2 | Proxy models | models.py + tests | +| 3 | Data access layer | data_access.py + tests | +| 4 | Forms | forms.py + tests | +| 5 | Public views & templates | views.py, 2 templates | +| 6 | Manager views & templates | views.py, 3 templates | +| 7 | Response views & templates | views.py, 2 templates | +| 8 | Review view & template | views.py, 1 template | +| 9 | JSON API views | api_views.py + tests | +| 10 | MCP tools | mcp_tools.py | +| 11 | Final URL assembly & integration test | urls.py + tests | diff --git a/docs/plans/2026-03-06-workflow-e2e-testing.md b/docs/plans/2026-03-06-workflow-e2e-testing.md new file mode 100644 index 000000000..736046b43 --- /dev/null +++ b/docs/plans/2026-03-06-workflow-e2e-testing.md @@ -0,0 +1,486 @@ +# Workflow Template E2E Testing — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** E2E test workflow templates using Playwright against local `runserver` with real OAuth credentials and real production data, starting with `audit_with_ai_review`. + +**Architecture:** Playwright headless browser hits a local Django dev server (port 8001). Session auth is injected via a DEBUG-only view that reuses `TokenManager` to set up `labs_oauth` in the Django session. Celery runs in eager mode so tasks execute synchronously. Tests are marked `@pytest.mark.e2e` and excluded from normal test runs. + +**Tech Stack:** pytest, pytest-playwright, Playwright (chromium), Django runserver subprocess + +--- + +### Task 1: Install dependencies + +**Files:** +- Modify: `requirements/local.txt` + +**Step 1: Add pytest-playwright to local requirements** + +Add to `requirements/local.txt`: +``` +pytest-playwright +``` + +**Step 2: Install and set up Playwright** + +Run: +```bash +pip install pytest-playwright +playwright install chromium +``` + +**Step 3: Register the e2e marker in pytest config** + +Add to `pyproject.toml` under `[tool.pytest.ini_options]`: +```toml +markers = [ + "e2e: End-to-end browser tests (require real OAuth token and runserver)", +] +``` + +**Step 4: Commit** + +```bash +git add requirements/local.txt pyproject.toml +git commit -m "chore: add pytest-playwright for E2E workflow testing" +``` + +--- + +### Task 2: Create the test-auth session injection view + +**Files:** +- Create: `commcare_connect/labs/views_test_auth.py` +- Modify: `commcare_connect/labs/urls.py` + +**Step 1: Write the test-auth view** + +Create `commcare_connect/labs/views_test_auth.py`. This view is gated behind `DEBUG=True` and reuses the exact session setup pattern from `commcare_connect/labs/management/commands/base_labs_url_test.py:50-103`. + +```python +""" +DEBUG-only view to inject a real OAuth session for Playwright E2E tests. + +Reads the CLI token from TokenManager, introspects it against production, +fetches org data, and writes labs_oauth into the Django session — exactly +like BaseLabsURLTest does for the Django test client. +""" + +import logging +from datetime import datetime + +from django.conf import settings +from django.http import JsonResponse +from django.utils import timezone +from django.views.decorators.http import require_GET + +from commcare_connect.labs.integrations.connect.cli import TokenManager +from commcare_connect.labs.integrations.connect.oauth import ( + fetch_user_organization_data, + introspect_token, +) + +logger = logging.getLogger(__name__) + + +@require_GET +def test_auth_view(request): + """Inject a real OAuth session for E2E testing. DEBUG only.""" + if not settings.DEBUG: + return JsonResponse({"error": "Only available in DEBUG mode"}, status=403) + + token_manager = TokenManager() + token_data = token_manager.load_token() + + if not token_data: + return JsonResponse({"error": "No CLI token found. Run: python manage.py get_cli_token"}, status=401) + + if token_manager.is_expired(): + return JsonResponse({"error": "CLI token expired. Run: python manage.py get_cli_token"}, status=401) + + access_token = token_data["access_token"] + + # Introspect token to get user profile + profile_data = introspect_token( + access_token=access_token, + client_id=settings.CONNECT_OAUTH_CLIENT_ID, + client_secret=settings.CONNECT_OAUTH_CLIENT_SECRET, + production_url=settings.CONNECT_PRODUCTION_URL, + ) + if not profile_data: + return JsonResponse({"error": "Token introspection failed"}, status=401) + + # Fetch org data + org_data = fetch_user_organization_data(access_token) + if not org_data: + return JsonResponse({"error": "Failed to fetch organization data"}, status=500) + + # Convert expires_at from ISO string to timestamp + if "expires_at" in token_data and isinstance(token_data["expires_at"], str): + expires_at = datetime.fromisoformat(token_data["expires_at"]).timestamp() + else: + expires_in = token_data.get("expires_in", 1209600) + expires_at = (timezone.now() + timezone.timedelta(seconds=expires_in)).timestamp() + + # Write session — same structure as the OAuth callback + request.session["labs_oauth"] = { + "access_token": access_token, + "refresh_token": token_data.get("refresh_token", ""), + "expires_at": expires_at, + "user_profile": { + "id": profile_data.get("id"), + "username": profile_data.get("username"), + "email": profile_data.get("email"), + "first_name": profile_data.get("first_name", ""), + "last_name": profile_data.get("last_name", ""), + }, + "organization_data": org_data, + } + + return JsonResponse({ + "success": True, + "username": profile_data.get("username"), + }) +``` + +**Step 2: Register the URL** + +In `commcare_connect/labs/urls.py`, add after the existing login/callback paths: + +```python +from commcare_connect.labs import views_test_auth + +# ... in urlpatterns: +path("test-auth/", views_test_auth.test_auth_view, name="test_auth"), +``` + +**Step 3: Verify manually** + +Run: +```bash +python manage.py runserver +``` +Then visit `http://localhost:8000/labs/test-auth/` — should return JSON with `{"success": true, "username": "jonathan"}`. + +**Step 4: Commit** + +```bash +git add commcare_connect/labs/views_test_auth.py commcare_connect/labs/urls.py +git commit -m "feat: add DEBUG-only test-auth view for Playwright session injection" +``` + +--- + +### Task 3: Create the E2E test infrastructure (conftest.py) + +**Files:** +- Create: `commcare_connect/workflow/tests/e2e/__init__.py` +- Create: `commcare_connect/workflow/tests/e2e/conftest.py` + +**Step 1: Create the conftest with all fixtures** + +Create `commcare_connect/workflow/tests/e2e/__init__.py` (empty). + +Create `commcare_connect/workflow/tests/e2e/conftest.py`: + +```python +""" +E2E test infrastructure for workflow templates. + +Fixtures: +- live_server_url: starts runserver on port 8001, yields base URL +- browser/page: Playwright chromium browser (from pytest-playwright) +- authenticated_page: page with valid OAuth session injected +- opportunity_id: configurable via --opportunity-id flag + +Usage: + pytest commcare_connect/workflow/tests/e2e/ -m e2e --opportunity-id=874 +""" + +import socket +import subprocess +import sys +import time + +import pytest + +E2E_PORT = 8001 +E2E_HOST = "127.0.0.1" + + +def pytest_addoption(parser): + parser.addoption( + "--opportunity-id", + action="store", + default="874", + help="Opportunity ID to use for E2E tests", + ) + + +@pytest.fixture(scope="session") +def opportunity_id(request): + return request.config.getoption("--opportunity-id") + + +@pytest.fixture(scope="session") +def live_server_url(): + """Start Django runserver as a subprocess on port 8001.""" + # Check port is free + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = sock.connect_ex((E2E_HOST, E2E_PORT)) + sock.close() + if result == 0: + # Port already in use — assume dev server is running, reuse it + yield f"http://{E2E_HOST}:{E2E_PORT}" + return + + proc = subprocess.Popen( + [sys.executable, "manage.py", "runserver", f"{E2E_HOST}:{E2E_PORT}", "--noreload"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + # Wait for server to be ready (up to 30s) + for _ in range(60): + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(1) + sock.connect((E2E_HOST, E2E_PORT)) + sock.close() + break + except OSError: + time.sleep(0.5) + else: + proc.kill() + raise RuntimeError(f"Django server failed to start on {E2E_HOST}:{E2E_PORT}") + + yield f"http://{E2E_HOST}:{E2E_PORT}" + + proc.terminate() + proc.wait(timeout=10) + + +@pytest.fixture(scope="session") +def authenticated_context(browser, live_server_url): + """Create a browser context with a valid OAuth session. + + Navigates to /labs/test-auth/ to inject the CLI token into the + Django session, then preserves the session cookie for all pages + created from this context. + """ + context = browser.new_context() + page = context.new_page() + + response = page.goto(f"{live_server_url}/labs/test-auth/") + assert response.status == 200, f"test-auth failed: {page.content()}" + + body = response.json() + assert body.get("success"), f"test-auth returned: {body}" + + page.close() + yield context + context.close() + + +@pytest.fixture +def auth_page(authenticated_context): + """A fresh page with valid auth session.""" + page = authenticated_context.new_page() + yield page + page.close() +``` + +**Step 2: Commit** + +```bash +git add commcare_connect/workflow/tests/e2e/ +git commit -m "feat: add E2E test infrastructure with Playwright fixtures" +``` + +--- + +### Task 4: Write the audit_with_ai_review E2E test + +**Files:** +- Create: `commcare_connect/workflow/tests/e2e/test_audit_workflow.py` + +**Step 1: Write the E2E test** + +Create `commcare_connect/workflow/tests/e2e/test_audit_workflow.py`: + +```python +""" +E2E test for the audit_with_ai_review workflow template. + +Tests the full happy path: +1. Create workflow from template (via POST to create endpoint) +2. Navigate to the workflow run page +3. Verify the React UI renders (Babel transpiles the JSX) +4. Switch to last_n mode and set a small sample size +5. Trigger audit creation +6. Wait for completion +7. Verify sessions appear +8. Clean up (delete the run) + +Run: + pytest commcare_connect/workflow/tests/e2e/test_audit_workflow.py -m e2e -v --opportunity-id=874 +""" + +import pytest +from playwright.sync_api import expect + +pytestmark = pytest.mark.e2e + + +class TestAuditWithAIReviewWorkflow: + """E2E test for the audit_with_ai_review workflow template.""" + + def test_full_audit_workflow(self, auth_page, live_server_url, opportunity_id): + """Test creating and running an audit workflow end-to-end.""" + page = auth_page + page.set_default_timeout(120_000) # 120s — eager Celery can be slow + + # --- Step 1: Create a workflow from the audit template --- + # POST to the create endpoint (same as clicking the template button) + page.goto(f"{live_server_url}/labs/workflow/?opportunity_id={opportunity_id}") + page.wait_for_load_state("networkidle") + + # Find and click the audit template create button + # The template list has forms with hidden input[name=template][value=audit_with_ai_review] + audit_form = page.locator("form:has(input[value='audit_with_ai_review'])") + if audit_form.count() > 0: + audit_form.locator("button[type='submit']").click() + page.wait_for_load_state("networkidle") + else: + # Template might already exist — navigate to workflow list and find it + pytest.skip("audit_with_ai_review template form not found on list page") + + # After creation, we're redirected back to /labs/workflow/ list + # Find the newly created workflow and click into it + # Look for a link containing "Weekly Audit with AI Review" + workflow_link = page.locator("a:has-text('Weekly Audit with AI Review')").first + expect(workflow_link).to_be_visible() + workflow_link.click() + page.wait_for_load_state("networkidle") + + # We should now be on the definition detail page — click "New Run" + new_run_link = page.locator("a:has-text('New Run'), a:has-text('Run'), button:has-text('New Run')").first + if new_run_link.is_visible(): + new_run_link.click() + page.wait_for_load_state("networkidle") + + # --- Step 2: Verify the React WorkflowUI renders --- + # Wait for Babel to transpile and the component to mount + # The audit UI has a mode selector (date_range vs last_n) + mode_selector = page.locator("text=Last N Per Opportunity, text=Last N visits, input[type='radio']").first + page.wait_for_selector("[data-testid='workflow-ui'], .workflow-container, text=Audit Mode", timeout=30_000) + + # --- Step 3: Configure for a small sample --- + # Switch to last_n mode for a fast, predictable test + last_n_radio = page.locator("label:has-text('Last N'), input[value='last_n_per_opp']").first + if last_n_radio.is_visible(): + last_n_radio.click() + + # Set a small count (e.g., 3) to keep the test fast + count_input = page.locator("input[type='number']").first + if count_input.is_visible(): + count_input.fill("3") + + # --- Step 4: Trigger audit creation --- + create_button = page.locator("button:has-text('Create Audit'), button:has-text('Run Audit')").first + expect(create_button).to_be_visible() + expect(create_button).to_be_enabled() + create_button.click() + + # --- Step 5: Wait for completion --- + # Progress UI should appear + page.wait_for_selector("text=Processing, text=Fetching, text=Extracting, text=Creating", timeout=15_000) + + # Wait for completion (generous timeout for real API calls) + page.wait_for_selector( + "text=Complete, text=Completed, text=sessions created", + timeout=120_000, + ) + + # --- Step 6: Verify results --- + # Sessions should appear in the linked sessions list + # The UI shows session cards/rows after completion + page.wait_for_timeout(2_000) # Brief wait for session fetch + + # Check that at least one session is visible + sessions_area = page.locator("text=Audit Session, text=sessions, text=FLW") + expect(sessions_area.first).to_be_visible(timeout=10_000) + + # --- Step 7: Cleanup --- + # Delete the workflow run to avoid polluting production labs records + # Get the run_id from the URL + current_url = page.url + if "run_id=" in current_url: + import re + run_id_match = re.search(r"run_id=(\d+)", current_url) + if run_id_match: + run_id = run_id_match.group(1) + # Delete via API + csrf_token = page.locator("input[name='csrfmiddlewaretoken']").first.get_attribute("value") or "" + page.request.post( + f"{live_server_url}/labs/workflow/api/run/{run_id}/delete/?opportunity_id={opportunity_id}", + headers={"X-CSRFToken": csrf_token}, + ) +``` + +**Step 2: Run the test to verify it works** + +Run: +```bash +pytest commcare_connect/workflow/tests/e2e/test_audit_workflow.py -m e2e -v --opportunity-id=874 +``` + +Expected: Test should pass end-to-end (may take 60-120s due to real API calls). + +**Step 3: Debug and adjust selectors as needed** + +The CSS selectors above are best-guesses based on the render code. After the first run, adjust selectors to match actual DOM elements. Use `page.screenshot(path="debug.png")` at failure points to inspect the UI state. + +**Step 4: Commit** + +```bash +git add commcare_connect/workflow/tests/e2e/test_audit_workflow.py +git commit -m "feat: add E2E test for audit_with_ai_review workflow template" +``` + +--- + +### Task 5: Verify everything works together + +**Step 1: Ensure CLI token is valid** + +Run: +```bash +python manage.py get_cli_token +``` + +**Step 2: Run the full E2E suite** + +Run: +```bash +pytest commcare_connect/workflow/tests/e2e/ -m e2e -v --opportunity-id=874 +``` + +Expected: Server starts on 8001, auth injects, audit workflow runs end-to-end, cleanup succeeds. + +**Step 3: Run normal pytest to verify E2E tests are excluded** + +Run: +```bash +pytest commcare_connect/workflow/tests/ -v +``` + +Expected: Only `test_mbw_v1_v2_parity.py` tests run. E2E tests are skipped (no `e2e` marker selected). + +**Step 4: Final commit** + +```bash +git add -A +git commit -m "docs: finalize E2E testing plan and infrastructure" +``` diff --git a/docs/plans/2026-03-07-commcare-mcp-server-design.md b/docs/plans/2026-03-07-commcare-mcp-server-design.md new file mode 100644 index 000000000..e4f873592 --- /dev/null +++ b/docs/plans/2026-03-07-commcare-mcp-server-design.md @@ -0,0 +1,131 @@ +# CommCare HQ MCP Server — Design + +**Date:** 2026-03-07 +**Status:** Approved + +## Overview + +Always-on MCP server for Claude Code sessions that provides CommCare application structure context. When building or debugging workflow pipeline schemas, Claude can query form question paths, case types, and module structure instead of guessing. + +**Primary use case:** Fix the KMC template's empty fields by understanding the actual CommCare form structure, and prevent the same issue in future workflow templates. + +## Architecture + +``` +Claude Code ──stdio──> commcare_mcp server ──HTTP──> CommCare HQ API + │ (/api/v0.5/application/) + ├─ reads env vars for auth + ├─ caches app definitions in memory + └─ serves 3 static reference resources +``` + +**Location:** `tools/commcare_mcp/` in the Connect repo +**Stack:** Python, `mcp` SDK (FastMCP), `httpx` for HTTP, stdio transport +**Auth:** CommCare HQ API key via env vars (`COMMCARE_HQ_API_KEY`, `COMMCARE_HQ_USER_EMAIL`, `COMMCARE_HQ_DOMAIN`) + +## MCP Tools (4) + +### `list_apps` +- **Input:** `domain` (optional, defaults to env var) +- **Output:** List of apps with name, ID, module count, form count +- **Use:** "What apps exist on this domain?" + +### `get_app_structure` +- **Input:** `domain`, `app_id` +- **Output:** Tree: modules → forms (with xmlns, case_type) → case types +- **Use:** "Show me the KMC app structure" + +### `get_form_questions` +- **Input:** `domain`, `app_id`, `xmlns` +- **Output:** Question tree with IDs, types, labels, constraints, groups/repeats +- **Use:** "What fields does the KMC visit form have?" + +### `get_form_json_paths` +- **Input:** `domain`, `app_id`, `xmlns` +- **Output:** Flat list mapping each question to its form submission JSON path +- **Use:** "What path should I use in PIPELINE_SCHEMAS for the weight field?" +- **Key logic:** Maps question paths to JSON paths: + - `/data/weight` → `form.weight` + - `/data/child_info/birth_weight` → `form.child_info.birth_weight` + - `/data/case/update/child_weight` → `form.case.update.child_weight` + - Groups create nested objects, repeats create arrays + +## MCP Resources (3) + +### `commcare://app-schema` +- **Source:** CommCare Forge's `compact-json-schema.md` +- **Content:** Question type taxonomy (20 types), case property mapping rules (`case_properties`, `case_preload`), group/repeat nesting, 24 reserved property names, validation rules +- **Why:** Provides the vocabulary for understanding CommCare app structure + +### `commcare://xml-reference` +- **Source:** CommCare Forge's `commcare-reference.md` +- **Content:** XForm/Suite/Case XML structure — bind types, case operations (create/update/close/index), session datums, detail definitions +- **Why:** Explains the upstream XML that produces the JSON structures we see in form submissions + +### `commcare://data-patterns` +- **Source:** New doc, distilled from Scout's loader code +- **Content:** How form submission JSON actually looks at query time: + - Form JSON structure (`form.` prefix, nested groups, repeat arrays) + - Case block nesting (`form.case.@case_id`, `form.group.case.@case_id`) + - Question path → JSON path mapping rules + - Common pitfalls (Python repr in form_json, reserved properties, `@` attributes) +- **Why:** The operational reality of working with CommCare data + +## Caching + +App definitions can be 5-10MB. Cache in memory after first fetch, keyed by `(domain, app_id)`. Invalidated on server restart. No persistent storage. + +## Configuration + +```json +// .claude/mcp.json +{ + "mcpServers": { + "commcare-hq": { + "command": "python", + "args": ["tools/commcare_mcp/server.py"], + "env": { + "COMMCARE_HQ_DOMAIN": "your-domain", + "COMMCARE_HQ_API_KEY": "user@example.com:your-api-key", + "COMMCARE_HQ_URL": "https://www.commcarehq.org" + } + } + } +} +``` + +## What It Does NOT Do + +- No real data access (no form submissions, no case data, no PII) +- No writes to CommCare +- No materialization or storage +- No Connect API access (that's handled by the existing labs infrastructure) + +## File Structure + +``` +tools/commcare_mcp/ +├── server.py # MCP server entry point (FastMCP, tool definitions) +├── hq_client.py # CommCare HQ API client (httpx, auth, caching) +├── extractors.py # App structure extraction, question path mapping +├── resources/ +│ ├── app_schema.md # CommCare Forge compact-json-schema (bundled) +│ ├── xml_reference.md # CommCare Forge XForm/Suite/Case reference (bundled) +│ └── data_patterns.md # Scout-derived form JSON patterns (new) +└── requirements.txt # mcp, httpx +``` + +## Key References + +- **Scout** (`../scout/mcp_server/loaders/commcare_metadata.py`): App structure extraction from HQ API — battle-tested logic for walking modules/forms/case_types +- **CommCare Forge** (`kcowger/commcare-forge` PR #3): Reference docs for CommCare concepts, question types, case property patterns +- **Existing HQ API code** (`commcare_connect/utils/commcarehq_api.py`): Already has `_get_commcare_app_json()` calling `/api/v0.5/application/` +- **Existing HQ OAuth** (`commcare_connect/labs/integrations/commcare/`): Full OAuth flow if we want to upgrade from API key auth later + +## Design Decisions + +1. **API key auth (not OAuth)** — Simpler for an always-on MCP server. No browser flow needed. Can upgrade to OAuth later if needed. +2. **Stdio transport** — Standard for Claude Code MCP servers. No HTTP port management. +3. **In-repo location** — Lives in `tools/` rather than a separate package. Simple, discoverable, versioned with the project. +4. **Static resource bundling** — Reference docs are copied into the repo, not fetched at runtime. They're stable documents that change rarely. +5. **Python** — Same stack as Connect. Can reuse patterns from existing HQ API code. `mcp` Python SDK is sufficient for our needs. diff --git a/docs/plans/2026-03-07-commcare-mcp-server.md b/docs/plans/2026-03-07-commcare-mcp-server.md new file mode 100644 index 000000000..86f64b5db --- /dev/null +++ b/docs/plans/2026-03-07-commcare-mcp-server.md @@ -0,0 +1,979 @@ +# CommCare HQ MCP Server — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Build an always-on MCP server that gives Claude Code access to CommCare application structure (form questions, case types, JSON paths) for building and debugging workflow pipeline schemas. + +**Architecture:** Standalone Python MCP server using FastMCP (stdio transport). Queries CommCare HQ `/api/v0.5/application/` API for app definitions. Exposes 4 tools + 3 static reference resources. Caches app definitions in memory. Auth via API key in env vars. + +**Tech Stack:** Python 3.13, `mcp` SDK (FastMCP), `httpx` for HTTP, bundled markdown resources from CommCare Forge + Scout. + +--- + +### Task 1: Create directory structure and requirements + +**Files:** +- Create: `tools/commcare_mcp/__init__.py` +- Create: `tools/commcare_mcp/requirements.txt` +- Create: `tools/commcare_mcp/resources/` (directory) + +**Step 1: Create the directory and requirements file** + +``` +tools/commcare_mcp/ +├── __init__.py (empty) +├── requirements.txt +└── resources/ +``` + +`tools/commcare_mcp/requirements.txt`: +``` +mcp>=1.20.0 +httpx>=0.27.0 +``` + +**Step 2: Verify structure** + +Run: `ls tools/commcare_mcp/` +Expected: `__init__.py requirements.txt resources/` + +**Step 3: Commit** + +```bash +git add tools/commcare_mcp/ +git commit -m "feat: scaffold commcare MCP server directory" +``` + +--- + +### Task 2: Bundle reference resources + +**Files:** +- Create: `tools/commcare_mcp/resources/app_schema.md` +- Create: `tools/commcare_mcp/resources/xml_reference.md` +- Create: `tools/commcare_mcp/resources/data_patterns.md` + +**Step 1: Copy CommCare Forge's compact-json-schema.md** + +Fetch from GitHub and save as `tools/commcare_mcp/resources/app_schema.md`: + +```bash +gh api repos/kcowger/commcare-forge/contents/docs/compact-json-schema.md --jq '.content' | base64 -d > tools/commcare_mcp/resources/app_schema.md +``` + +**Step 2: Copy CommCare Forge's commcare-reference.md** + +```bash +gh api repos/kcowger/commcare-forge/contents/docs/commcare-reference.md --jq '.content' | base64 -d > tools/commcare_mcp/resources/xml_reference.md +``` + +**Step 3: Write the data_patterns.md resource** + +This is a new document distilled from Scout's loader code. Create `tools/commcare_mcp/resources/data_patterns.md`: + +```markdown +# CommCare Form Data Patterns + +How CommCare form submissions look as JSON when retrieved via API — the operational +reality needed for mapping pipeline schema field paths. + +## Form Submission JSON Structure + +When a CommCare form is submitted, the API returns: + +```json +{ + "id": "form-uuid", + "form": { + "@xmlns": "http://openrosa.org/formdesigner/FORM-UUID", + "@name": "Visit Form", + "question_id": "value", + "group_name": { + "nested_question": "value" + }, + "repeat_group": [ + {"item_question": "value1"}, + {"item_question": "value2"} + ], + "case": { + "@case_id": "case-uuid", + "update": { + "property_name": "value" + } + }, + "meta": { + "userID": "user-uuid", + "timeStart": "2026-01-15T10:30:00Z", + "timeEnd": "2026-01-15T10:35:00Z" + } + }, + "received_on": "2026-01-15T10:35:01Z", + "app_id": "app-uuid" +} +``` + +## Question Path → JSON Path Mapping Rules + +CommCare question IDs map to form submission JSON paths as follows: + +| Question path in app definition | JSON path in form submission | +|--------------------------------|------------------------------| +| `/data/weight` | `form.weight` | +| `/data/child_info/birth_weight` (inside group) | `form.child_info.birth_weight` | +| `/data/visits/visit_date` (inside repeat) | `form.visits[].visit_date` | +| `/data/case/update/last_weight` (case property) | `form.case.update.last_weight` | +| `/data/case/@case_id` (case reference) | `form.case.@case_id` | +| `/data/meta/userID` (form metadata) | `form.meta.userID` | + +**Rules:** +1. Strip the `/data/` prefix and replace with `form.` +2. Groups create nested objects: `/data/group/question` → `form.group.question` +3. Repeat groups create arrays: `/data/repeat/question` → `form.repeat[].question` +4. Case blocks appear at `form.case` (or deeper: `form.group.case`) +5. The `@` prefix on attributes is preserved: `@case_id`, `@xmlns`, `@name` +6. The `meta` block is always at `form.meta` with `userID`, `timeStart`, `timeEnd`, etc. + +## Case Block Nesting + +Case blocks can appear at ANY depth in the form JSON. They are identified by the +presence of `@case_id` in a dict: + +```json +// Top-level case +"form": { "case": { "@case_id": "abc", "update": { "weight": "2500" } } } + +// Nested in a group +"form": { "child_group": { "case": { "@case_id": "def", "create": { ... } } } } + +// Inside a repeat group (one case per repeat entry) +"form": { "household_members": [ + { "case": { "@case_id": "ghi", "update": { ... } } }, + { "case": { "@case_id": "jkl", "update": { ... } } } +] } +``` + +## Common Field Patterns + +### Weight/measurements +``` +form.weight → current weight (usually grams as string) +form.birth_weight → birth weight +form.child_weight_visit → weight at visit (alternative naming) +``` + +### GPS/Location +``` +form.gps → "lat lon altitude accuracy" (space-separated string) +``` + +### Dates +``` +form.visit_date → "2026-01-15" (date string) +form.meta.timeStart → "2026-01-15T10:30:00Z" (form open time) +form.meta.timeEnd → "2026-01-15T10:35:00Z" (form submit time) +``` + +### Case identification +``` +form.case.@case_id → the case being updated +form.subcase_0.case.@case_id → child case (when creating sub-cases) +``` + +### Beneficiary/entity linking +``` +form.case.@case_id → the beneficiary case ID (most common) +form.case.index.parent → parent case reference +``` + +## Common Pitfalls + +1. **Field names are case-sensitive** — `form.Weight` ≠ `form.weight` +2. **Repeat groups become arrays** — even if there's only one entry +3. **Empty fields may be omitted** — check for key existence, don't assume all fields present +4. **Select multiple values are space-separated** — `"option1 option2 option3"` +5. **Numbers are strings** — weights, ages, etc. come as `"2500"` not `2500` +6. **GPS is a space-separated string** — `"0.3456 32.1234 1200 10"` (lat, lon, alt, accuracy) +7. **form_json from Connect visits** — may be Python repr format (`{'key': 'value'}` with single quotes) instead of valid JSON. Use `ast.literal_eval` as fallback. +8. **@-prefixed attributes** — `@case_id`, `@xmlns`, `@name` are XML attribute artifacts preserved in JSON +``` + +**Step 4: Verify all three resources exist** + +Run: `ls tools/commcare_mcp/resources/` +Expected: `app_schema.md data_patterns.md xml_reference.md` + +**Step 5: Commit** + +```bash +git add tools/commcare_mcp/resources/ +git commit -m "feat: bundle CommCare reference resources for MCP server" +``` + +--- + +### Task 3: Implement HQ API client with caching + +**Files:** +- Create: `tools/commcare_mcp/hq_client.py` + +**Step 1: Write the HQ client** + +This is adapted from Scout's `commcare_base.py` + `commcare_metadata.py`. Uses `httpx` (async), API key auth, and in-memory caching. + +Create `tools/commcare_mcp/hq_client.py`: + +```python +"""CommCare HQ API client for fetching application definitions. + +Auth: API key via env vars (COMMCARE_HQ_API_KEY format: "user@example.com:apikey123") +Caching: In-memory, keyed by (domain, app_id). Invalidated on server restart. +""" + +from __future__ import annotations + +import logging +import os + +import httpx + +logger = logging.getLogger(__name__) + +HQ_URL = os.environ.get("COMMCARE_HQ_URL", "https://www.commcarehq.org") +HQ_API_KEY = os.environ.get("COMMCARE_HQ_API_KEY", "") # "user@email.com:apikey" +HQ_DOMAIN = os.environ.get("COMMCARE_HQ_DOMAIN", "") +HTTP_TIMEOUT = httpx.Timeout(connect=10, read=120, write=10, pool=10) + +# In-memory cache: (domain, app_id) -> app definition dict +_app_cache: dict[tuple[str, str], dict] = {} +# domain -> list of app summaries +_app_list_cache: dict[str, list[dict]] = {} + + +def _auth_header() -> dict[str, str]: + """Build Authorization header from env var.""" + if not HQ_API_KEY: + raise ValueError( + "COMMCARE_HQ_API_KEY not set. Format: 'user@example.com:your-api-key'" + ) + return {"Authorization": f"ApiKey {HQ_API_KEY}"} + + +async def list_apps(domain: str | None = None) -> list[dict]: + """Fetch all applications for a domain from CommCare HQ. + + Returns list of dicts with: id, name, version, module_count, form_count. + Results are cached in memory. + """ + domain = domain or HQ_DOMAIN + if not domain: + raise ValueError("No domain specified. Set COMMCARE_HQ_DOMAIN or pass domain parameter.") + + if domain in _app_list_cache: + return _app_list_cache[domain] + + apps_raw = await _fetch_all_apps(domain) + summaries = [] + for app in apps_raw: + modules = app.get("modules", []) + form_count = sum(len(m.get("forms", [])) for m in modules) + summaries.append({ + "id": app.get("id", ""), + "name": app.get("name", ""), + "version": app.get("version", 0), + "is_released": app.get("is_released", False), + "module_count": len(modules), + "form_count": form_count, + }) + + _app_list_cache[domain] = summaries + return summaries + + +async def get_app(domain: str | None, app_id: str) -> dict: + """Fetch a single application definition. Cached after first fetch.""" + domain = domain or HQ_DOMAIN + if not domain: + raise ValueError("No domain specified.") + + cache_key = (domain, app_id) + if cache_key in _app_cache: + return _app_cache[cache_key] + + # Fetch all apps and find the one we want + apps = await _fetch_all_apps(domain) + for app in apps: + key = (domain, app.get("id", "")) + _app_cache[key] = app + + if cache_key not in _app_cache: + raise ValueError(f"App {app_id} not found in domain {domain}") + + return _app_cache[cache_key] + + +async def _fetch_all_apps(domain: str) -> list[dict]: + """Fetch all application definitions from the HQ API with pagination.""" + url = f"{HQ_URL}/a/{domain}/api/v0.5/application/" + params = {"limit": 100} + apps: list[dict] = [] + + async with httpx.AsyncClient( + headers=_auth_header(), + timeout=HTTP_TIMEOUT, + ) as client: + while url: + resp = await client.get(url, params=params) + if resp.status_code in (401, 403): + raise PermissionError( + f"CommCare HQ auth failed for domain {domain}: HTTP {resp.status_code}. " + "Check COMMCARE_HQ_API_KEY." + ) + resp.raise_for_status() + data = resp.json() + apps.extend(data.get("objects", [])) + url = data.get("next") + params = {} # next URL includes params + + logger.info("Fetched %d apps for domain %s", len(apps), domain) + return apps + + +def clear_cache(): + """Clear all cached app definitions.""" + _app_cache.clear() + _app_list_cache.clear() +``` + +**Step 2: Verify it imports cleanly** + +Run: `cd tools/commcare_mcp && python -c "import hq_client; print('OK')"` +Expected: `OK` + +**Step 3: Commit** + +```bash +git add tools/commcare_mcp/hq_client.py +git commit -m "feat: add CommCare HQ API client with caching" +``` + +--- + +### Task 4: Implement extractors (app structure + JSON path mapping) + +**Files:** +- Create: `tools/commcare_mcp/extractors.py` + +**Step 1: Write the extractors module** + +This is the core logic — extracts structured app info from raw HQ API responses and maps question paths to form JSON paths. Adapted from Scout's `_extract_case_types` and `_extract_form_definitions`. + +Create `tools/commcare_mcp/extractors.py`: + +```python +"""Extract structured app information from CommCare HQ API responses. + +Functions: +- extract_app_structure: module → form → case type tree +- extract_form_questions: question tree with types and labels +- extract_form_json_paths: flat mapping of question → JSON path (for pipeline schemas) +- extract_case_types: all case types across an app's modules +""" + +from __future__ import annotations + +from typing import Any + + +def extract_app_structure(app: dict) -> dict: + """Extract a clean app structure tree from a raw HQ app definition. + + Returns: + { + "app_id": str, + "app_name": str, + "modules": [ + { + "name": str, + "case_type": str, + "forms": [ + {"name": str, "xmlns": str, "questions_count": int} + ] + } + ], + "case_types": [{"name": str, "module": str}] + } + """ + modules = [] + case_types_seen: set[str] = set() + case_types: list[dict] = [] + + for module in app.get("modules", []): + mod_name = _get_name(module) + ct = module.get("case_type", "") + + forms = [] + for form in module.get("forms", []): + forms.append({ + "name": _get_name(form), + "xmlns": form.get("xmlns", ""), + "question_count": len(form.get("questions", [])), + }) + + modules.append({ + "name": mod_name, + "case_type": ct, + "forms": forms, + }) + + if ct and ct not in case_types_seen: + case_types_seen.add(ct) + case_types.append({"name": ct, "module": mod_name}) + + return { + "app_id": app.get("id", ""), + "app_name": app.get("name", ""), + "modules": modules, + "case_types": case_types, + } + + +def extract_form_questions(app: dict, xmlns: str) -> dict | None: + """Extract the question tree for a specific form identified by xmlns. + + Returns: + { + "form_name": str, + "module_name": str, + "case_type": str, + "xmlns": str, + "questions": [ + { + "id": str, # e.g. "weight" + "type": str, # e.g. "Int" + "label": str, # e.g. "Weight (grams)" + "path": str, # e.g. "/data/weight" + "required": bool, + "constraint": str | None, + "relevant": str | None, + "calculate": str | None, + "options": [{"value": str, "label": str}] | None, + "children": [...] | None, # for groups/repeats + } + ] + } + """ + for module in app.get("modules", []): + for form in module.get("forms", []): + if form.get("xmlns") == xmlns: + questions = _process_questions(form.get("questions", [])) + return { + "form_name": _get_name(form), + "module_name": _get_name(module), + "case_type": module.get("case_type", ""), + "xmlns": xmlns, + "questions": questions, + } + return None + + +def extract_form_json_paths(app: dict, xmlns: str) -> dict | None: + """Extract a flat mapping of form questions to their JSON submission paths. + + This is the key tool for building PIPELINE_SCHEMAS — it tells you exactly + what path to use for each field. + + Returns: + { + "form_name": str, + "xmlns": str, + "case_type": str, + "paths": [ + { + "json_path": "form.weight", # use this in PIPELINE_SCHEMAS + "question_path": "/data/weight", # original XForm path + "type": "Int", # CommCare data type + "label": "Weight (grams)", # human-readable label + } + ] + } + """ + for module in app.get("modules", []): + for form in module.get("forms", []): + if form.get("xmlns") == xmlns: + paths = _build_json_paths(form.get("questions", [])) + return { + "form_name": _get_name(form), + "xmlns": xmlns, + "case_type": module.get("case_type", ""), + "paths": paths, + } + return None + + +def _process_questions(questions: list[dict]) -> list[dict]: + """Process raw HQ question list into a clean tree.""" + result = [] + for q in questions: + processed = { + "id": _question_id_from_path(q.get("value", "")), + "type": q.get("type", ""), + "label": _get_label(q), + "path": q.get("value", ""), + "required": q.get("required", False), + } + + # Optional fields — only include if present + if q.get("constraint"): + processed["constraint"] = q["constraint"] + if q.get("relevant"): + processed["relevant"] = q["relevant"] + if q.get("calculate"): + processed["calculate"] = q["calculate"] + + # Options for select questions + options = q.get("options") + if options: + processed["options"] = [ + {"value": o.get("value", ""), "label": _get_label(o)} + for o in options + ] + + # Nested questions for groups/repeats + children = q.get("children") + if children: + processed["children"] = _process_questions(children) + + result.append(processed) + return result + + +def _build_json_paths( + questions: list[dict], prefix: str = "form" +) -> list[dict]: + """Build flat list of JSON paths from HQ question definitions. + + Maps each question's XForm path to its form submission JSON path. + Rules: + /data/weight → form.weight + /data/group/question → form.group.question + /data/repeat/question → form.repeat[].question + """ + paths: list[dict] = [] + + for q in questions: + q_path = q.get("value", "") + q_type = q.get("type", "") + label = _get_label(q) + + # Convert XForm path to JSON path + json_path = _xform_path_to_json_path(q_path, prefix) + + # Skip group/repeat containers themselves — only include leaf questions + if q_type in ("Group", "Repeat"): + # Recurse into children with updated prefix + children = q.get("children", []) + if children: + child_prefix = json_path + if q_type == "Repeat": + child_prefix = f"{json_path}[]" + paths.extend(_build_json_paths(children, prefix=child_prefix)) + continue + + if json_path: + paths.append({ + "json_path": json_path, + "question_path": q_path, + "type": q_type, + "label": label, + }) + + return paths + + +def _xform_path_to_json_path(xform_path: str, prefix: str = "form") -> str: + """Convert an XForm question path to a form submission JSON path. + + /data/weight → form.weight + /data/group/question → form.group.question + """ + if not xform_path: + return "" + + # Strip /data/ prefix + parts = xform_path.strip("/").split("/") + if parts and parts[0] == "data": + parts = parts[1:] + + if not parts: + return "" + + return f"{prefix}.{'.'.join(parts)}" + + +def _question_id_from_path(path: str) -> str: + """Extract the question ID (last segment) from an XForm path.""" + if not path: + return "" + return path.rstrip("/").rsplit("/", 1)[-1] + + +def _get_name(obj: dict) -> str: + """Extract display name from HQ object (handles dict/string name field).""" + name = obj.get("name", "") + if isinstance(name, dict): + return name.get("en", next(iter(name.values()), "")) + return str(name) + + +def _get_label(obj: dict) -> str: + """Extract display label from a question object.""" + label = obj.get("label", "") + if isinstance(label, dict): + return label.get("en", next(iter(label.values()), "")) + return str(label) +``` + +**Step 2: Verify it imports cleanly** + +Run: `cd tools/commcare_mcp && python -c "import extractors; print('OK')"` +Expected: `OK` + +**Step 3: Commit** + +```bash +git add tools/commcare_mcp/extractors.py +git commit -m "feat: add app structure extractors and JSON path mapping" +``` + +--- + +### Task 5: Implement the MCP server + +**Files:** +- Create: `tools/commcare_mcp/server.py` + +**Step 1: Write the MCP server** + +This wires together the HQ client and extractors into MCP tools and resources. + +Create `tools/commcare_mcp/server.py`: + +```python +"""CommCare HQ MCP Server. + +Provides CommCare application structure context for Claude Code sessions. +Tools let you explore app modules, form questions, and JSON field paths +for building workflow pipeline schemas. + +Usage (stdio, for Claude Code): + python tools/commcare_mcp/server.py + +Configuration via env vars: + COMMCARE_HQ_DOMAIN - Default CommCare domain + COMMCARE_HQ_API_KEY - API key as "user@email.com:apikey" + COMMCARE_HQ_URL - HQ base URL (default: https://www.commcarehq.org) +""" + +from __future__ import annotations + +import logging +from pathlib import Path + +from mcp.server.fastmcp import FastMCP + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +RESOURCES_DIR = Path(__file__).parent / "resources" + +mcp = FastMCP( + "commcare-hq", + instructions=( + "CommCare HQ application structure server. Use these tools to understand " + "CommCare app form structure, question types, and JSON field paths. " + "This is especially useful when building or debugging workflow pipeline " + "schemas (PIPELINE_SCHEMAS) that map form fields to data extraction paths." + ), +) + + +# --- Resources --- + + +@mcp.resource("commcare://app-schema") +def app_schema_resource() -> str: + """CommCare app structure reference — question types, case properties, validation rules.""" + return (RESOURCES_DIR / "app_schema.md").read_text(encoding="utf-8") + + +@mcp.resource("commcare://xml-reference") +def xml_reference_resource() -> str: + """CommCare XForm/Suite/Case XML structure reference.""" + return (RESOURCES_DIR / "xml_reference.md").read_text(encoding="utf-8") + + +@mcp.resource("commcare://data-patterns") +def data_patterns_resource() -> str: + """How CommCare form submission JSON is structured — path mapping rules and pitfalls.""" + return (RESOURCES_DIR / "data_patterns.md").read_text(encoding="utf-8") + + +# --- Tools --- + + +@mcp.tool() +async def list_apps(domain: str = "") -> dict: + """List all CommCare applications for a domain. + + Returns app names, IDs, module counts, and form counts. + Use this to find the app_id needed for other tools. + + Args: + domain: CommCare domain name (optional, uses COMMCARE_HQ_DOMAIN env var if not set) + """ + from tools.commcare_mcp.hq_client import list_apps as _list_apps + + try: + apps = await _list_apps(domain or None) + return {"apps": apps, "count": len(apps)} + except Exception as e: + return {"error": str(e)} + + +@mcp.tool() +async def get_app_structure(app_id: str, domain: str = "") -> dict: + """Get the module/form/case-type structure of a CommCare application. + + Shows the full app tree: modules → forms (with xmlns) → case types. + Use this to understand how an app is organized before drilling into specific forms. + + Args: + app_id: The CommCare application ID (from list_apps) + domain: CommCare domain name (optional, uses env var default) + """ + from tools.commcare_mcp.extractors import extract_app_structure + from tools.commcare_mcp.hq_client import get_app + + try: + app = await get_app(domain or None, app_id) + return extract_app_structure(app) + except Exception as e: + return {"error": str(e)} + + +@mcp.tool() +async def get_form_questions(app_id: str, xmlns: str, domain: str = "") -> dict: + """Get the full question tree for a specific form. + + Shows all questions with their types, labels, constraints, skip logic, + and nesting (groups/repeats). Use this to understand what data a form collects. + + Args: + app_id: The CommCare application ID + xmlns: The form's xmlns identifier (from get_app_structure) + domain: CommCare domain name (optional) + """ + from tools.commcare_mcp.extractors import extract_form_questions + from tools.commcare_mcp.hq_client import get_app + + try: + app = await get_app(domain or None, app_id) + result = extract_form_questions(app, xmlns) + if result is None: + return {"error": f"Form with xmlns '{xmlns}' not found in app {app_id}"} + return result + except Exception as e: + return {"error": str(e)} + + +@mcp.tool() +async def get_form_json_paths(app_id: str, xmlns: str, domain: str = "") -> dict: + """Map form questions to their JSON submission paths for pipeline schemas. + + THIS IS THE KEY TOOL for building PIPELINE_SCHEMAS. It shows exactly what + path each form question will have in submitted form JSON. + + Example output: + {"json_path": "form.weight", "type": "Int", "label": "Weight (grams)"} + {"json_path": "form.child_info.birth_weight", "type": "Decimal", "label": "Birth Weight"} + + Use the json_path values directly in PIPELINE_SCHEMAS field definitions: + {"name": "weight", "path": "form.weight", "transform": "float"} + + Args: + app_id: The CommCare application ID + xmlns: The form's xmlns identifier (from get_app_structure) + domain: CommCare domain name (optional) + """ + from tools.commcare_mcp.extractors import extract_form_json_paths + from tools.commcare_mcp.hq_client import get_app + + try: + app = await get_app(domain or None, app_id) + result = extract_form_json_paths(app, xmlns) + if result is None: + return {"error": f"Form with xmlns '{xmlns}' not found in app {app_id}"} + return result + except Exception as e: + return {"error": str(e)} + + +if __name__ == "__main__": + mcp.run(transport="stdio") +``` + +**Step 2: Verify it starts without errors (will fail on missing env vars, that's OK)** + +Run: `python tools/commcare_mcp/server.py --help 2>&1 || echo "Server module loads"` +Expected: Server loads (may error on missing stdin transport when not run by MCP client, that's fine) + +**Step 3: Commit** + +```bash +git add tools/commcare_mcp/server.py +git commit -m "feat: implement CommCare HQ MCP server with 4 tools and 3 resources" +``` + +--- + +### Task 6: Configure Claude Code MCP integration + +**Files:** +- Modify: `.claude/mcp.json` (create if doesn't exist) + +**Step 1: Check if .claude/mcp.json exists** + +Run: `cat .claude/mcp.json 2>/dev/null || echo "Does not exist"` + +**Step 2: Add or update the MCP server configuration** + +Add the `commcare-hq` server entry. The env vars should use placeholder values — the user will fill in their real API key. + +`.claude/mcp.json`: +```json +{ + "mcpServers": { + "commcare-hq": { + "command": "python", + "args": ["tools/commcare_mcp/server.py"], + "env": { + "COMMCARE_HQ_DOMAIN": "", + "COMMCARE_HQ_API_KEY": "", + "COMMCARE_HQ_URL": "https://www.commcarehq.org" + } + } + } +} +``` + +Note: If `.claude/mcp.json` already exists with other servers (like Sentry), merge this entry into the existing `mcpServers` object. + +**Step 3: Add `.claude/mcp.json` to `.gitignore` if not already there** + +This file contains API keys and should NOT be committed. Check if it's already gitignored: + +Run: `grep -q "mcp.json" .gitignore && echo "Already ignored" || echo ".claude/mcp.json" >> .gitignore` + +**Step 4: Commit the gitignore update (if changed)** + +```bash +git add .gitignore +git commit -m "chore: gitignore .claude/mcp.json (contains API keys)" +``` + +--- + +### Task 7: Test the server end-to-end with real data + +**Files:** None (manual testing) + +**Step 1: Configure the MCP server with real credentials** + +Edit `.claude/mcp.json` and fill in: +- `COMMCARE_HQ_DOMAIN`: The domain for the KMC opportunity +- `COMMCARE_HQ_API_KEY`: User's API key in `email:key` format + +**Step 2: Test the server directly via Python** + +Run a quick smoke test that exercises the HQ client and extractors: + +```bash +COMMCARE_HQ_DOMAIN=your-domain COMMCARE_HQ_API_KEY="user@email.com:key" python -c " +import asyncio +import sys +sys.path.insert(0, 'tools/commcare_mcp') +from hq_client import list_apps +apps = asyncio.run(list_apps()) +print(f'Found {len(apps)} apps') +for app in apps[:5]: + print(f' {app[\"name\"]} (id={app[\"id\"]}, {app[\"form_count\"]} forms)') +" +``` + +Expected: List of apps from the domain. + +**Step 3: Test form JSON path extraction** + +Once you have an app_id from Step 2, test the path extraction: + +```bash +COMMCARE_HQ_DOMAIN=your-domain COMMCARE_HQ_API_KEY="user@email.com:key" python -c " +import asyncio, json, sys +sys.path.insert(0, 'tools/commcare_mcp') +from hq_client import get_app +from extractors import extract_app_structure, extract_form_json_paths + +app = asyncio.run(get_app(None, 'YOUR_APP_ID')) +structure = extract_app_structure(app) +print(json.dumps(structure, indent=2)[:2000]) + +# Get paths for the first form +first_xmlns = structure['modules'][0]['forms'][0]['xmlns'] +paths = extract_form_json_paths(app, first_xmlns) +print(json.dumps(paths, indent=2)[:2000]) +" +``` + +Expected: App structure tree and form JSON path mappings. + +**Step 4: Restart Claude Code to pick up the new MCP server** + +After configuring `.claude/mcp.json`, restart the Claude Code session. The server should appear in the available tools. Test by asking Claude to `list_apps` or `get_form_json_paths`. + +--- + +### Task 8: Fix the server module import paths + +**Files:** +- Possibly modify: `tools/commcare_mcp/server.py` + +The import paths in `server.py` use `from tools.commcare_mcp.hq_client import ...` which assumes the working directory is the repo root. This may need adjustment depending on how Claude Code launches MCP servers. + +**Step 1: Test the import path** + +Run from repo root: +```bash +python -c "import sys; sys.path.insert(0, '.'); from tools.commcare_mcp.hq_client import list_apps; print('OK')" +``` + +If this fails, the server.py imports need to use relative imports instead: +```python +# Change from: +from tools.commcare_mcp.hq_client import list_apps as _list_apps +# To: +from hq_client import list_apps as _list_apps +``` + +**Step 2: Verify the server runs from repo root** + +```bash +cd /path/to/commcare-connect +python tools/commcare_mcp/server.py +``` + +**Step 3: Fix imports if needed and commit** + +```bash +git add tools/commcare_mcp/server.py +git commit -m "fix: adjust import paths for MCP server" +``` diff --git a/docs/plans/2026-03-07-kmc-flw-flags-design.md b/docs/plans/2026-03-07-kmc-flw-flags-design.md new file mode 100644 index 000000000..5930783a7 --- /dev/null +++ b/docs/plans/2026-03-07-kmc-flw-flags-design.md @@ -0,0 +1,213 @@ +# KMC FLW Flag Report Template — Design + +**Date:** 2026-03-07 +**Status:** Approved +**Template key:** `kmc_flw_flags` + +## Purpose + +Replicate the KMC FLW Flag Report (PDF) as an interactive workflow template. Identifies FLWs with concerning performance patterns across 8 flags in three domains (case management, danger signs, weight tracking), then enables targeted audit creation with AI review for selected FLWs. + +## Architecture + +### Data Flow + +``` +CommCare Forms (Registration + Visit) + ↓ +Pipeline "flw_flags" (aggregated, GROUP BY username) + → SQL: case counts, mortality, enrollment timing, danger sign rates +Pipeline "weight_series" (visit-level) + → Per-visit: username, case_id, visit_date, weight + ↓ +React RENDER_CODE + → Merges: flw_flags (pre-computed) + weight_series (client-side weight pair analysis) + → Applies hardcoded thresholds → 8 boolean flags per FLW + → Renders: KPI cards + sortable flag table + checkbox selection + ↓ +User selects flagged FLWs → "Create Audits" + ↓ +actions.createAudit() → per-FLW sessions, last week, weight image filter, scale_validation AI +``` + +### Why Two Pipelines + +The `flw_flags` pipeline handles the 5 flags computable with standard SQL aggregation. Weight pair flags (wt_loss, wt_gain, wt_zero) require comparing consecutive visits per child (window functions), which the pipeline SQL builder doesn't yet support. `weight_series` provides raw visit-level weight data for client-side pair analysis. + +## Pipeline #1: `flw_flags` (aggregated) + +- **alias:** `flw_flags` +- **data_source:** `connect_csv` +- **grouping_key:** `username` +- **terminal_stage:** `aggregated` + +### Fields + +| Field | Aggregation | Source Path | Purpose | +|-------|-------------|-------------|---------| +| `total_cases` | count_distinct | `form.kmc_beneficiary_case_id` | Min case filter | +| `closed_cases` | filtered count_distinct | case_id WHERE case closed | Mortality denominator | +| `deaths` | filtered count_distinct | case_id WHERE child_alive='no' | Mortality numerator | +| `total_visits` | count | visit forms only | Visits flag | +| `avg_visits_per_case` | subquery | visits / distinct closed cases (non-mortality, last 50) | flag_visits | +| `pct_single_visit` | subquery | cases with exactly 1 visit / total cases | Display metric | +| `mortality_rate` | computed | deaths / closed_cases | flag_mort_low, flag_mort_high | +| `pct_8plus_days` | subquery | cases where reg_date - discharge_date >= 8 / total | flag_enroll | +| `danger_visit_count` | count | visit forms with danger_sign_positive field | flag_danger min threshold | +| `danger_positive_count` | filtered count | WHERE danger_sign_positive='yes' | flag_danger_high | +| `pct_danger_positive` | computed | danger_positive_count / danger_visit_count | flag_danger_high | + +### Key CommCare Form Paths + +**Registration form** (`58991FD0-F6A7-4DA2-8C74-AE4655A424A7`): +- `form.hosp_lbl.date_hospital_discharge` — Hospital discharge date +- `form.reg_date` — Registration date +- `form.case_close_condition` — Case close condition +- `form.child_alive` — Is baby alive (registration-time) + +**Visit form** (`42DFAFE1-C3B5-4F11-A400-827DA369F2C9`): +- `form.anthropometric.child_weight_visit` — Weight in grams +- `form.danger_signs_checklist.danger_sign_positive` — Computed danger sign flag +- `form.child_alive` — Is baby alive (visit-time) +- `form.kmc_beneficiary_case_id` — Links visit to beneficiary case +- `form.grp_kmc_visit.visit_number` — Visit number +- `form.grp_kmc_visit.visit_date` — Visit date +- `form.grp_kmc_beneficiary.reg_date` — Registration date (from case) + +## Pipeline #2: `weight_series` (visit-level) + +- **alias:** `weight_series` +- **data_source:** `connect_csv` +- **grouping_key:** `username` +- **terminal_stage:** `visit_level` + +### Fields + +| Field | Path | Transform | +|-------|------|-----------| +| `username` | (built-in) | — | +| `beneficiary_case_id` | `form.kmc_beneficiary_case_id` | — | +| `visit_date` | `form.grp_kmc_visit.visit_date` | date | +| `weight` | `form.anthropometric.child_weight_visit` | float | + +Client-side computation in React: +1. Group by FLW → by child → sort by date +2. For each consecutive pair: compute weight_diff and days_diff +3. Aggregate per FLW: pct_wt_loss, mean_daily_gain, pct_wt_zero + +## UI Layout + +### KPI Cards (top row) + +``` +┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ +│ 40 FLWs │ │ 11 Flagged │ │ 34 Excluded │ │ 3,471 Cases │ +│ Analyzed │ │ (2+ flags) │ │ (<20 cases) │ │ Total │ +└──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘ +``` + +Cards use the same style as KMC longitudinal template (colored borders, bold numbers). + +### Filter Bar + +``` +[All FLWs ▾] [Flagged Only] [2+ Flags] [Search: ___________] +``` + +### Flag Table + +``` +☑ │ FLW │ Cases │ Avg Vis │ Mort% │ 8+Days │ Danger │ Wt Loss │ Gain │ Wt Zero │ Flags +──┼────────┼───────┼─────────┼────────┼────────┼────────┼─────────┼───────┼─────────┼────── +☑ │ 2935 │ 23 │ 2.65* │ 0.0%* │ NE │ 0.0%* │ NE │ NE │ NE │ 3 +☐ │ 2198 │ 38 │ 1.83* │ 0.0%* │ 80.0%* │ 10.0% │ NE │ NE │ NE │ 3 +``` + +- Flagged cells: red/pink background (matching PDF) +- "NE" when min cases not met +- Sortable by any column (default: flags descending) +- Select-all checkbox in header +- Row highlight on selection + +### Action Bar (sticky bottom) + +``` +┌─────────────────────────────────────────────────────────┐ +│ 2 FLWs selected │ [Create Audits with AI Review] │ +│ │ Last week · Weight images · AI │ +└─────────────────────────────────────────────────────────┘ +``` + +## Flag Thresholds (hardcoded) + +```javascript +const THRESHOLDS = { + visits: 3.0, // avg visits < 3.0 + mort_low: 0.02, // mortality < 2% + mort_high: 0.20, // mortality > 20% + enroll: 0.35, // 8+ days enrollment > 35% + danger_high: 0.30, // danger sign positive > 30% + danger_zero_min: 30, // zero across 30+ visits + wt_loss: 0.15, // weight loss pairs > 15% + wt_gain: 60, // daily gain > 60 g/day + wt_zero: 0.30, // zero change pairs > 30% +}; + +const MIN_CASES = { + visits: 10, // 10 closed cases + mortality: 20, // 20 closed cases + enroll: 10, // 10 enrollment records + danger: 20, // 20 followup visits + danger_zero: 30, // 30 followup visits + weight: 10, // 10 weight pairs +}; +``` + +## Audit Creation + +When user clicks "Create Audits with AI Review": + +1. Calls `actions.createAudit()` with: + - `opportunities: [{ id: instance.opportunity_id }]` + - `criteria.audit_type: 'date_range'` + - `criteria.granularity: 'per_flw'` + - `criteria.start_date / end_date`: last Monday–Sunday + - `criteria.related_fields`: weight image + reading (same as weekly audit) + - `ai_agent_id: 'scale_validation'` + - `workflow_run_id: instance.id` + - `selected_flw_user_ids`: usernames from checked rows +2. Streams progress via `actions.streamAuditProgress()` +3. Shows linked audit sessions table below the flag table when complete + +## Template Structure + +Single file: `commcare_connect/workflow/templates/kmc_flw_flags.py` + +```python +TEMPLATE = { + "key": "kmc_flw_flags", + "name": "KMC FLW Flag Report", + "description": "Identifies FLWs with concerning performance patterns. Select flagged FLWs to create targeted audits with AI review.", + "icon": "fa-flag", + "color": "red", + "definition": DEFINITION, + "render_code": RENDER_CODE, + "pipeline_schemas": PIPELINE_SCHEMAS, +} +``` + +## Pipeline Extensions Required + +The current pipeline SQL builder may need extensions for: +1. `count_distinct` aggregation type (currently defaults to MIN) +2. Subqueries for per-case metrics aggregated to per-FLW level +3. Filtered count_distinct (cases WHERE condition) + +These are additive changes to `query_builder.py` that don't break existing pipelines. + +## Out of Scope + +- Multi-opportunity support (use separate workflow runs per opportunity) +- Configurable thresholds UI (thresholds are constants, edit template to change) +- Historical flag trend tracking +- Export to PDF diff --git a/docs/plans/2026-03-07-kmc-flw-flags-plan.md b/docs/plans/2026-03-07-kmc-flw-flags-plan.md new file mode 100644 index 000000000..6ab2dd075 --- /dev/null +++ b/docs/plans/2026-03-07-kmc-flw-flags-plan.md @@ -0,0 +1,1196 @@ +# KMC FLW Flag Report Template — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Build a workflow template that computes 8 FLW performance flags from CommCare data and enables targeted audit creation with AI review for selected FLWs. + +**Architecture:** Two-pipeline template (`kmc_flw_flags.py`). Pipeline #1 (`flw_flags`, aggregated) computes per-FLW metrics via SQL GROUP BY. Pipeline #2 (`weight_series`, visit-level) provides raw weight data for client-side consecutive-pair analysis. React renders KPI cards + sortable flag table with checkboxes. Audit creation reuses the existing `actions.createAudit()` flow. + +**Tech Stack:** Python (pipeline config), PostgreSQL (SQL aggregation), React/JSX (RENDER_CODE via Babel), Playwright (E2E testing) + +**Design doc:** `docs/plans/2026-03-07-kmc-flw-flags-design.md` + +--- + +## Task 1: Add `count_distinct` aggregation to pipeline query builder + +The pipeline needs `COUNT(DISTINCT value)` for counting unique beneficiary cases per FLW. Currently `count_unique` falls through to `MIN()`. + +**Files:** +- Modify: `commcare_connect/labs/analysis/backends/sql/query_builder.py:167-193` +- Test: `commcare_connect/labs/tests/test_query_builder.py` (create) + +**Step 1: Write failing test** + +```python +# commcare_connect/labs/tests/test_query_builder.py +import pytest + +from commcare_connect.labs.analysis.backends.sql.query_builder import _aggregation_to_sql + + +class TestAggregationToSQL: + def test_count_distinct(self): + result = _aggregation_to_sql("count_distinct", "beneficiary_case_id", "total_cases") + assert "COUNT(DISTINCT" in result + assert "beneficiary_case_id" in result + + def test_count(self): + result = _aggregation_to_sql("count", "visit_id", "total_visits") + assert result == "COUNT(visit_id)" + + def test_first_uses_subquery(self): + result = _aggregation_to_sql("first", "weight", "first_weight") + assert "ORDER BY visit_date ASC" in result + assert "LIMIT 1" in result + + def test_unknown_falls_to_min(self): + result = _aggregation_to_sql("bogus", "val", "field") + assert result == "MIN(val)" +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest commcare_connect/labs/tests/test_query_builder.py -v --ds=config.settings.local -o "addopts="` +Expected: `test_count_distinct` FAILS (count_unique falls through to MIN) + +**Step 3: Implement count_distinct** + +In `query_builder.py`, add before the `else: return MIN` fallback (around line 190): + +```python + elif agg == "count_distinct" or agg == "count_unique": + return f"COUNT(DISTINCT {value_expr})" +``` + +Also add `"last"` while we're here (mirror of "first" with DESC): + +```python + elif agg == "last": + return f"""( + SELECT sub.val FROM ( + SELECT {value_expr} as val, visit_date + FROM labs_raw_visit_cache sub + WHERE sub.opportunity_id = labs_raw_visit_cache.opportunity_id + AND sub.username = labs_raw_visit_cache.username + AND {value_expr} IS NOT NULL + ORDER BY visit_date DESC + LIMIT 1 + ) sub + )""" +``` + +**Step 4: Run tests to verify they pass** + +Run: `pytest commcare_connect/labs/tests/test_query_builder.py -v --ds=config.settings.local -o "addopts="` +Expected: ALL PASS + +**Step 5: Commit** + +```bash +git add commcare_connect/labs/tests/test_query_builder.py commcare_connect/labs/analysis/backends/sql/query_builder.py +git commit -m "feat: add count_distinct and last aggregation types to pipeline query builder" +``` + +--- + +## Task 2: Add per-field filter support for conditional aggregation + +Several flag metrics require conditional counting (e.g., count distinct cases WHERE child_alive='no'). Add an optional `filter` on FieldComputation that generates PostgreSQL `FILTER (WHERE ...)` clauses. + +**Files:** +- Modify: `commcare_connect/labs/analysis/config.py:62-145` (FieldComputation) +- Modify: `commcare_connect/labs/analysis/backends/sql/query_builder.py:167-193` +- Modify: `commcare_connect/workflow/data_access.py:1709-1721` (_schema_to_config field parsing) +- Test: `commcare_connect/labs/tests/test_query_builder.py` (extend) + +**Step 1: Write failing test** + +Add to `test_query_builder.py`: + +```python +from commcare_connect.labs.analysis.config import FieldComputation + + +class TestFilteredAggregation: + def test_count_distinct_with_filter(self): + """COUNT(DISTINCT case_id) FILTER (WHERE child_alive = 'no')""" + field = FieldComputation( + name="deaths", + path="form.kmc_beneficiary_case_id", + aggregation="count_distinct", + filter_path="form.child_alive", + filter_value="no", + ) + result = _aggregation_to_sql( + field.aggregation, + "COALESCE(form_json->'form'->>'kmc_beneficiary_case_id', '')", + field.name, + filter_path=field.filter_path, + filter_value=field.filter_value, + ) + assert "FILTER" in result + assert "child_alive" in result + + def test_count_without_filter(self): + result = _aggregation_to_sql("count", "val", "field") + assert "FILTER" not in result +``` + +**Step 2: Run test to verify it fails** + +Run: `pytest commcare_connect/labs/tests/test_query_builder.py::TestFilteredAggregation -v --ds=config.settings.local -o "addopts="` +Expected: FAIL — FieldComputation doesn't accept `filter_path` + +**Step 3: Add filter fields to FieldComputation** + +In `config.py`, add to FieldComputation class (after `extractor` field, around line 115): + +```python + filter_path: str = "" # Optional: path for FILTER (WHERE ...) clause + filter_value: str = "" # Optional: value to compare against in filter +``` + +No changes needed to `__post_init__` validation — these are optional. + +**Step 4: Update _aggregation_to_sql signature** + +In `query_builder.py`, update the function signature and add FILTER clause generation: + +```python +def _aggregation_to_sql( + agg: str, + value_expr: str, + field_name: str, + filter_path: str = "", + filter_value: str = "", +) -> str: + """Convert aggregation type to SQL aggregate function.""" + # Build the base aggregation + if agg == "count": + base = f"COUNT({value_expr})" + elif agg == "sum": + base = f"SUM({value_expr})" + elif agg == "avg": + base = f"AVG({value_expr})" + elif agg == "count_distinct" or agg == "count_unique": + base = f"COUNT(DISTINCT {value_expr})" + elif agg == "first": + # Subquery — filters not applicable + return f"""( + SELECT sub.val FROM ( + SELECT {value_expr} as val, visit_date + FROM labs_raw_visit_cache sub + WHERE sub.opportunity_id = labs_raw_visit_cache.opportunity_id + AND sub.username = labs_raw_visit_cache.username + AND {value_expr} IS NOT NULL + ORDER BY visit_date ASC + LIMIT 1 + ) sub + )""" + elif agg == "last": + return f"""( + SELECT sub.val FROM ( + SELECT {value_expr} as val, visit_date + FROM labs_raw_visit_cache sub + WHERE sub.opportunity_id = labs_raw_visit_cache.opportunity_id + AND sub.username = labs_raw_visit_cache.username + AND {value_expr} IS NOT NULL + ORDER BY visit_date DESC + LIMIT 1 + ) sub + )""" + elif agg == "list": + base = f"ARRAY_AGG({value_expr}) FILTER (WHERE {value_expr} IS NOT NULL)" + # list already has its own FILTER, so skip adding another + return base + elif agg == "min": + base = f"MIN({value_expr})" + elif agg == "max": + base = f"MAX({value_expr})" + else: + base = f"MIN({value_expr})" + + # Apply optional FILTER clause + if filter_path and filter_value: + filter_sql = _jsonb_path_to_sql(filter_path) + base = f"{base} FILTER (WHERE {filter_sql} = '{filter_value}')" + + return base +``` + +**Step 5: Update callers of _aggregation_to_sql** + +In `query_builder.py`, find where `_aggregation_to_sql` is called (in `build_flw_aggregation_query` or similar) and pass the filter params from the FieldComputation: + +```python +# Where fields are iterated to build SELECT expressions: +agg_sql = _aggregation_to_sql( + field.aggregation, + value_expr, + field.name, + filter_path=field.filter_path, + filter_value=field.filter_value, +) +``` + +**Step 6: Update _schema_to_config in data_access.py** + +In `workflow/data_access.py` around line 1709-1721, add filter_path and filter_value to the FieldComputation constructor: + +```python +fields.append( + FieldComputation( + name=field_def["name"], + path=field_def.get("path", ""), + paths=field_def.get("paths"), + aggregation=field_def.get("aggregation", "first"), + transform=get_transform(field_def.get("transform")), + description=field_def.get("description", ""), + default=field_def.get("default"), + filter_path=field_def.get("filter_path", ""), + filter_value=field_def.get("filter_value", ""), + ) +) +``` + +**Step 7: Run tests** + +Run: `pytest commcare_connect/labs/tests/test_query_builder.py -v --ds=config.settings.local -o "addopts="` +Expected: ALL PASS + +**Step 8: Commit** + +```bash +git add commcare_connect/labs/analysis/config.py commcare_connect/labs/analysis/backends/sql/query_builder.py commcare_connect/workflow/data_access.py commcare_connect/labs/tests/test_query_builder.py +git commit -m "feat: add per-field filter support for conditional SQL aggregation" +``` + +--- + +## Task 3: Create template skeleton with PIPELINE_SCHEMAS + +Create the template file with pipeline definitions and basic structure. + +**Files:** +- Create: `commcare_connect/workflow/templates/kmc_flw_flags.py` + +**Step 1: Create the template file** + +```python +# commcare_connect/workflow/templates/kmc_flw_flags.py +""" +KMC FLW Flag Report — identifies FLWs with concerning performance patterns +across 8 flags in three domains: case management, danger signs, weight tracking. +Enables targeted audit creation with AI review for selected FLWs. + +Reference: KMC_FLW_Flag_Report_Full.pdf (2026-03-07) +Design: docs/plans/2026-03-07-kmc-flw-flags-design.md +""" + +# --------------------------------------------------------------------------- +# Pipeline Schemas +# --------------------------------------------------------------------------- + +PIPELINE_SCHEMAS = [ + # Pipeline 1: FLW-level aggregated metrics + { + "alias": "flw_flags", + "name": "FLW Flag Metrics", + "description": "Per-FLW aggregated metrics for flag computation", + "schema": { + "data_source": {"type": "connect_csv"}, + "grouping_key": "username", + "terminal_stage": "aggregated", + "fields": [ + # --- Case management metrics --- + { + "name": "total_cases", + "paths": ["form.kmc_beneficiary_case_id", "form.case.@case_id"], + "aggregation": "count_distinct", + "description": "Total unique beneficiary cases", + }, + { + "name": "closed_cases", + "paths": ["form.kmc_beneficiary_case_id", "form.case.@case_id"], + "aggregation": "count_distinct", + "filter_path": "form.case_close_condition", + "filter_value": "closed", + "description": "Unique cases that have been closed", + }, + { + "name": "deaths", + "paths": ["form.kmc_beneficiary_case_id", "form.case.@case_id"], + "aggregation": "count_distinct", + "filter_path": "form.child_alive", + "filter_value": "no", + "description": "Unique cases where child died", + }, + { + "name": "total_visits", + "path": "form.grp_kmc_visit.visit_number", + "aggregation": "count", + "description": "Total visit form submissions", + }, + # --- Danger sign metrics --- + { + "name": "danger_visit_count", + "path": "form.danger_signs_checklist.danger_sign_positive", + "aggregation": "count", + "description": "Total visits with danger sign assessment", + }, + { + "name": "danger_positive_count", + "path": "form.danger_signs_checklist.danger_sign_positive", + "aggregation": "count", + "filter_path": "form.danger_signs_checklist.danger_sign_positive", + "filter_value": "yes", + "description": "Visits where danger sign was positive", + }, + # --- Enrollment timing --- + { + "name": "reg_date", + "paths": ["form.reg_date", "form.grp_kmc_beneficiary.reg_date"], + "aggregation": "first", + "description": "Registration date (for enrollment timing)", + }, + { + "name": "discharge_date", + "path": "form.hosp_lbl.date_hospital_discharge", + "aggregation": "first", + "description": "Hospital discharge date", + }, + ], + }, + }, + # Pipeline 2: Visit-level weight data for consecutive pair analysis + { + "alias": "weight_series", + "name": "Weight Series", + "description": "Per-visit weight measurements for weight flag computation", + "schema": { + "data_source": {"type": "connect_csv"}, + "grouping_key": "username", + "terminal_stage": "visit_level", + "linking_field": "beneficiary_case_id", + "fields": [ + { + "name": "beneficiary_case_id", + "paths": ["form.kmc_beneficiary_case_id", "form.case.@case_id"], + "aggregation": "first", + }, + { + "name": "visit_date", + "paths": ["form.grp_kmc_visit.visit_date"], + "aggregation": "first", + "transform": "date", + }, + { + "name": "weight", + "paths": [ + "form.anthropometric.child_weight_visit", + "form.child_details.birth_weight_reg.child_weight_reg", + ], + "aggregation": "first", + "transform": "float", + }, + { + "name": "visit_number", + "paths": ["form.grp_kmc_visit.visit_number"], + "aggregation": "first", + "transform": "int", + }, + ], + }, + }, +] + + +# --------------------------------------------------------------------------- +# Definition +# --------------------------------------------------------------------------- + +DEFINITION = { + "name": "KMC FLW Flag Report", + "description": "Identifies FLWs with concerning performance patterns across case management, danger signs, and weight tracking. Select flagged FLWs to create targeted audits with AI review.", + "version": 1, + "templateType": "kmc_flw_flags", + "statuses": [ + {"id": "pending", "label": "Pending Review", "color": "gray"}, + {"id": "audits_created", "label": "Audits Created", "color": "green"}, + ], + "config": { + "showSummaryCards": False, + "showFilters": False, + }, + "pipeline_sources": [], +} + + +# --------------------------------------------------------------------------- +# Render Code (React JSX) +# --------------------------------------------------------------------------- + +RENDER_CODE = """ +// PLACEHOLDER — implemented in Task 4-7 +function WorkflowUI({ definition, instance, workers, pipelines, links, actions, onUpdateState }) { + return React.createElement('div', {className: 'p-4'}, + React.createElement('p', null, 'KMC FLW Flag Report — loading...') + ); +} +""" + + +# --------------------------------------------------------------------------- +# Template Export +# --------------------------------------------------------------------------- + +TEMPLATE = { + "key": "kmc_flw_flags", + "name": "KMC FLW Flag Report", + "description": "Identifies FLWs with concerning performance patterns. Select flagged FLWs to create targeted audits with AI review.", + "icon": "fa-flag", + "color": "red", + "definition": DEFINITION, + "render_code": RENDER_CODE, + "pipeline_schemas": PIPELINE_SCHEMAS, +} +``` + +**Step 2: Verify template auto-discovery** + +Run Python to check the template is discovered: + +```bash +python -c "from commcare_connect.workflow.templates import list_templates; print([t['key'] for t in list_templates()])" +``` + +Expected: List includes `'kmc_flw_flags'` + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "feat: add kmc_flw_flags template skeleton with pipeline schemas" +``` + +--- + +## Task 4: Implement RENDER_CODE — data processing and flag computation + +Write the core data processing functions that merge both pipeline results and compute all 8 flags. + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` (RENDER_CODE) + +**Step 1: Replace RENDER_CODE placeholder** + +Replace the RENDER_CODE string with the full JSX implementation. The code below shows the complete data processing section. The UI sections follow in Tasks 5-7. + +```python +RENDER_CODE = r""" +function WorkflowUI({ definition, instance, workers, pipelines, links, actions, onUpdateState }) { + + // ── Thresholds (matching KMC_FLW_Flag_Report_Full.pdf) ────────── + const THRESHOLDS = { + visits: 3.0, + mort_low: 0.02, + mort_high: 0.20, + enroll: 0.35, + danger_high: 0.30, + danger_zero_min_visits: 30, + wt_loss: 0.15, + wt_gain: 60, + wt_zero: 0.30, + }; + const MIN_CASES = { + visits: 10, + mortality: 20, + enroll: 10, + danger: 20, + danger_zero: 30, + weight: 10, + }; + + // ── State ─────────────────────────────────────────────────────── + const [selectedWorkers, setSelectedWorkers] = React.useState({}); + const [selectAll, setSelectAll] = React.useState(false); + const [filter, setFilter] = React.useState('all'); + const [sortKey, setSortKey] = React.useState('flag_count'); + const [sortAsc, setSortAsc] = React.useState(false); + const [search, setSearch] = React.useState(''); + const [isRunning, setIsRunning] = React.useState(false); + const [progress, setProgress] = React.useState(null); + const [linkedSessions, setLinkedSessions] = React.useState([]); + const cleanupRef = React.useRef(null); + + // ── Extract pipeline data ─────────────────────────────────────── + const flwRows = pipelines && pipelines.flw_flags ? (pipelines.flw_flags.rows || []) : []; + const weightRows = pipelines && pipelines.weight_series ? (pipelines.weight_series.rows || []) : []; + + // ── Weight pair analysis (client-side) ────────────────────────── + const computeWeightMetrics = React.useCallback((username, weightData) => { + // Filter to this FLW's visits with valid weight + const flwVisits = weightData + .filter(v => v.username === username && v.weight && parseFloat(v.weight) > 0) + .sort((a, b) => { + const da = new Date(a.visit_date); + const db = new Date(b.visit_date); + return da - db; + }); + + // Group by beneficiary + const byChild = {}; + flwVisits.forEach(v => { + const cid = v.beneficiary_case_id; + if (!cid) return; + if (!byChild[cid]) byChild[cid] = []; + byChild[cid].push(v); + }); + + let totalPairs = 0; + let lossPairs = 0; + let zeroPairs = 0; + let totalDailyGain = 0; + let gainPairCount = 0; + + Object.values(byChild).forEach(visits => { + for (let i = 1; i < visits.length; i++) { + const prev = parseFloat(visits[i - 1].weight); + const curr = parseFloat(visits[i].weight); + const prevDate = new Date(visits[i - 1].visit_date); + const currDate = new Date(visits[i].visit_date); + const daysDiff = (currDate - prevDate) / (1000 * 60 * 60 * 24); + + if (isNaN(prev) || isNaN(curr) || prev <= 0 || curr <= 0) continue; + if (daysDiff <= 0) continue; + + totalPairs++; + const weightDiff = curr - prev; + + if (weightDiff < 0) lossPairs++; + if (weightDiff === 0) zeroPairs++; + if (daysDiff > 0) { + totalDailyGain += weightDiff / daysDiff; + gainPairCount++; + } + } + }); + + return { + total_weight_pairs: totalPairs, + pct_wt_loss: totalPairs >= MIN_CASES.weight ? lossPairs / totalPairs : null, + mean_daily_gain: gainPairCount >= MIN_CASES.weight ? totalDailyGain / gainPairCount : null, + pct_wt_zero: totalPairs >= MIN_CASES.weight ? zeroPairs / totalPairs : null, + }; + }, [weightRows]); + + // ── Merge pipeline data + compute flags ───────────────────────── + const flwData = React.useMemo(() => { + return flwRows.map(row => { + const totalCases = parseInt(row.total_cases) || 0; + const closedCases = parseInt(row.closed_cases) || 0; + const deaths = parseInt(row.deaths) || 0; + const totalVisits = parseInt(row.total_visits) || 0; + const dangerVisitCount = parseInt(row.danger_visit_count) || 0; + const dangerPositiveCount = parseInt(row.danger_positive_count) || 0; + + // Avg visits per closed case + const avgVisits = closedCases > 0 ? totalVisits / closedCases : null; + // Mortality rate + const mortRate = closedCases > 0 ? deaths / closedCases : null; + // Danger sign rate + const pctDanger = dangerVisitCount > 0 ? dangerPositiveCount / dangerVisitCount : null; + + // Weight metrics (from visit-level pipeline) + const wm = computeWeightMetrics(row.username, weightRows); + + // Enrollment timing — TODO: requires per-case reg_date vs discharge_date + // For now compute from available data + const pct8PlusDays = null; // Will be computed when enrollment pipeline is refined + + // ── Flag computation ──────────────────────────────────── + const excluded = totalCases < 20; + const flags = {}; + + // flag_visits: avg visits < 3.0 (min 10 closed cases) + flags.visits = !excluded && closedCases >= MIN_CASES.visits && avgVisits !== null && avgVisits < THRESHOLDS.visits; + // flag_mort_low: mortality < 2% (min 20 closed cases) + flags.mort_low = !excluded && closedCases >= MIN_CASES.mortality && mortRate !== null && mortRate < THRESHOLDS.mort_low; + // flag_mort_high: mortality > 20% (min 20 closed cases) + flags.mort_high = !excluded && closedCases >= MIN_CASES.mortality && mortRate !== null && mortRate > THRESHOLDS.mort_high; + // flag_enroll: >35% enrolled 8+ days after discharge (min 10 records) + flags.enroll = !excluded && pct8PlusDays !== null && pct8PlusDays > THRESHOLDS.enroll; + // flag_danger_high: >30% danger sign positive (min 20 visits) + flags.danger_high = !excluded && dangerVisitCount >= MIN_CASES.danger && pctDanger !== null && pctDanger > THRESHOLDS.danger_high; + // flag_danger_zero: zero danger signs across 30+ visits + flags.danger_zero = !excluded && dangerVisitCount >= MIN_CASES.danger_zero && dangerPositiveCount === 0; + // flag_wt_loss: >15% weight loss pairs (min 10 pairs) + flags.wt_loss = !excluded && wm.pct_wt_loss !== null && wm.pct_wt_loss > THRESHOLDS.wt_loss; + // flag_wt_gain: mean daily gain > 60 g/day (min 10 pairs) + flags.wt_gain = !excluded && wm.mean_daily_gain !== null && wm.mean_daily_gain > THRESHOLDS.wt_gain; + // flag_wt_zero: >30% zero change pairs (min 10 pairs) + flags.wt_zero = !excluded && wm.pct_wt_zero !== null && wm.pct_wt_zero > THRESHOLDS.wt_zero; + + const flagCount = Object.values(flags).filter(Boolean).length; + + return { + username: row.username, + total_cases: totalCases, + closed_cases: closedCases, + deaths: deaths, + total_visits: totalVisits, + avg_visits: avgVisits, + mort_rate: mortRate, + pct_8plus_days: pct8PlusDays, + pct_danger: pctDanger, + danger_visit_count: dangerVisitCount, + danger_positive_count: dangerPositiveCount, + pct_wt_loss: wm.pct_wt_loss, + mean_daily_gain: wm.mean_daily_gain, + pct_wt_zero: wm.pct_wt_zero, + total_weight_pairs: wm.total_weight_pairs, + flags: flags, + flag_count: flagCount, + excluded: excluded, + }; + }); + }, [flwRows, weightRows, computeWeightMetrics]); + + // ── Filter + sort ─────────────────────────────────────────────── + const filteredData = React.useMemo(() => { + let data = flwData.filter(d => !d.excluded); + if (filter === 'flagged') data = data.filter(d => d.flag_count > 0); + if (filter === '2plus') data = data.filter(d => d.flag_count >= 2); + if (search) { + const q = search.toLowerCase(); + data = data.filter(d => d.username.toLowerCase().includes(q)); + } + data.sort((a, b) => { + const av = a[sortKey] ?? -Infinity; + const bv = b[sortKey] ?? -Infinity; + return sortAsc ? (av > bv ? 1 : -1) : (av < bv ? 1 : -1); + }); + return data; + }, [flwData, filter, search, sortKey, sortAsc]); + + // ── KPI summaries ─────────────────────────────────────────────── + const kpis = React.useMemo(() => { + const analyzed = flwData.filter(d => !d.excluded); + const excluded = flwData.filter(d => d.excluded); + const flagged = analyzed.filter(d => d.flag_count >= 2); + const totalCases = flwData.reduce((s, d) => s + d.total_cases, 0); + return { + analyzed: analyzed.length, + excluded: excluded.length, + flagged: flagged.length, + totalCases: totalCases, + }; + }, [flwData]); + + // ── Selection helpers ─────────────────────────────────────────── + const toggleWorker = (username) => { + setSelectedWorkers(prev => ({ ...prev, [username]: !prev[username] })); + }; + const handleSelectAll = () => { + const newState = !selectAll; + setSelectAll(newState); + const newSelected = {}; + filteredData.forEach(d => { newSelected[d.username] = newState; }); + setSelectedWorkers(newSelected); + }; + const selectedCount = Object.values(selectedWorkers).filter(Boolean).length; + + // ── Format helpers ────────────────────────────────────────────── + const fmt = (val, type) => { + if (val === null || val === undefined) return 'NE'; + if (type === 'pct') return (val * 100).toFixed(1) + '%'; + if (type === 'dec') return val.toFixed(2); + if (type === 'gain') return val.toFixed(1); + return String(val); + }; + + // ── Audit creation ────────────────────────────────────────────── + const handleCreateAudits = async () => { + const selectedUsernames = Object.entries(selectedWorkers) + .filter(([_, selected]) => selected) + .map(([username]) => username); + + if (selectedUsernames.length === 0) return; + setIsRunning(true); + setProgress({ status: 'starting' }); + + try { + const now = new Date(); + const dayOfWeek = now.getDay(); + const lastMonday = new Date(now); + lastMonday.setDate(now.getDate() - dayOfWeek - 6); + const lastSunday = new Date(lastMonday); + lastSunday.setDate(lastMonday.getDate() + 6); + + const startDate = lastMonday.toISOString().split('T')[0]; + const endDate = lastSunday.toISOString().split('T')[0]; + + const result = await actions.createAudit({ + opportunities: [{ id: instance.opportunity_id }], + criteria: { + audit_type: 'date_range', + granularity: 'per_flw', + title: 'FLW Flag Audit - ' + startDate, + start_date: startDate, + end_date: endDate, + related_fields: [{ + imagePath: 'anthropometric/upload_weight_image', + fieldPath: 'child_weight_visit', + label: 'Weight Reading', + filter_by_image: true, + filter_by_field: true, + }], + selected_flw_user_ids: selectedUsernames, + }, + workflow_run_id: instance.id, + ai_agent_id: 'scale_validation', + }); + + if (result && result.task_id) { + const cleanup = actions.streamAuditProgress( + result.task_id, + (progressData) => { setProgress(progressData); }, + (finalResult) => { + setIsRunning(false); + setProgress({ status: 'completed', ...finalResult }); + onUpdateState({ status: 'audits_created' }).catch(() => {}); + fetch('/audit/api/workflow/' + instance.id + '/sessions/') + .then(res => res.json()) + .then(data => { + if (data.success && data.sessions) setLinkedSessions(data.sessions); + }) + .catch(() => {}); + }, + (error) => { + setIsRunning(false); + setProgress({ status: 'failed', error }); + } + ); + cleanupRef.current = cleanup; + } + } catch (err) { + setIsRunning(false); + setProgress({ status: 'failed', error: err.message }); + } + }; + + // ── Load linked sessions on mount ─────────────────────────────── + React.useEffect(() => { + if (instance && instance.id) { + fetch('/audit/api/workflow/' + instance.id + '/sessions/') + .then(res => res.json()) + .then(data => { + if (data.success && data.sessions) setLinkedSessions(data.sessions); + }) + .catch(() => {}); + } + return () => { if (cleanupRef.current) cleanupRef.current(); }; + }, [instance]); + + // ── Render ────────────────────────────────────────────────────── + if (!pipelines || !pipelines.flw_flags) { + return
Loading pipeline data...
; + } + + const handleSort = (key) => { + if (sortKey === key) { setSortAsc(!sortAsc); } + else { setSortKey(key); setSortAsc(false); } + }; + + const SortIcon = ({ col }) => { + if (sortKey !== col) return ; + return {sortAsc ? '↑' : '↓'}; + }; + + const FlagCell = ({ value, flagged, type }) => { + const bg = flagged ? 'bg-red-100 text-red-800 font-semibold' : ''; + return {fmt(value, type)}; + }; + + return ( +
+ {/* KPI Cards */} +
+
+
{kpis.analyzed}
+
FLWs Analyzed
+
+
+
{kpis.flagged}
+
With 2+ Flags
+
+
+
{kpis.excluded}
+
Excluded (<20 cases)
+
+
+
{kpis.totalCases.toLocaleString()}
+
Total Cases
+
+
+ + {/* Filter Bar */} +
+ {['all', 'flagged', '2plus'].map(f => ( + + ))} + setSearch(e.target.value)} + className="ml-auto px-3 py-1.5 border rounded-lg text-sm w-48" + /> +
+ + {/* Flag Table */} +
+ + + + + + + + + + + + + + + + + + {filteredData.map(d => ( + = 2 ? 'border-l-4 border-red-400' : d.flag_count === 1 ? 'border-l-4 border-orange-300' : '')}> + + + + + + + + + + + + + ))} + {filteredData.length === 0 && ( + + )} + +
+ + handleSort('username')} className="px-3 py-3 text-left text-xs font-medium text-gray-500 uppercase cursor-pointer"> + FLW + handleSort('total_cases')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Cases + handleSort('avg_visits')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Avg Vis + handleSort('mort_rate')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Mort % + handleSort('pct_8plus_days')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + 8+ Days + handleSort('pct_danger')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Danger + handleSort('pct_wt_loss')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Wt Loss + handleSort('mean_daily_gain')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Gain + handleSort('pct_wt_zero')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Wt Zero + handleSort('flag_count')} className="px-3 py-3 text-right text-xs font-medium text-gray-500 uppercase cursor-pointer"> + Flags +
+ toggleWorker(d.username)} disabled={isRunning} + className="rounded border-gray-300" /> + {d.username}{d.total_cases}= 2 ? 'text-red-700' : d.flag_count === 1 ? 'text-orange-600' : 'text-gray-400')}> + {d.flag_count} +
No FLWs match the current filter
+
+ + {/* Action Bar */} +
+
+ {selectedCount > 0 ? ( + {selectedCount} FLW{selectedCount !== 1 ? 's' : ''} selected + ) : ( + Select FLWs to create audits + )} +
+
+ {progress && progress.status === 'completed' && ( + Audits created successfully + )} + {progress && progress.status === 'failed' && ( + {progress.error || 'Failed'} + )} + {isRunning && progress && ( + + {progress.stage_name || 'Processing...'} + {progress.processed && progress.total ? ` (${progress.processed}/${progress.total})` : ''} + + )} + +
+
+ + {/* Linked Audit Sessions */} + {linkedSessions.length > 0 && ( + + )} +
+ ); +} +""" +``` + +**Step 2: Verify template still loads** + +```bash +python -c "from commcare_connect.workflow.templates import get_template; t = get_template('kmc_flw_flags'); print('OK:', len(t['render_code']), 'chars')" +``` + +Expected: `OK: XXXX chars` + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "feat: implement KMC FLW flag report RENDER_CODE with full UI" +``` + +--- + +## Task 5: Add template to __init__.py exports + +**Files:** +- Modify: `commcare_connect/workflow/templates/__init__.py:218-231` + +**Step 1: Add import and __all__ entry** + +The template auto-discovers via `pkgutil`, but the explicit import in `__init__.py` ensures it's in `__all__`: + +Add `kmc_flw_flags` to the import line (~line 218): +```python +from . import audit_with_ai_review, kmc_flw_flags, kmc_longitudinal, mbw_monitoring_v2, ocs_outreach, performance_review +``` + +Add to `__all__` (~line 227): +```python + "kmc_flw_flags", +``` + +**Step 2: Verify** + +```bash +python -c "from commcare_connect.workflow.templates import list_templates; keys = [t['key'] for t in list_templates()]; print(keys); assert 'kmc_flw_flags' in keys" +``` + +Expected: List printed, assertion passes + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/templates/__init__.py +git commit -m "feat: register kmc_flw_flags template in __init__.py exports" +``` + +--- + +## Task 6: Write E2E test + +**Files:** +- Create: `commcare_connect/workflow/tests/e2e/test_flw_flags_workflow.py` +- Reference: `commcare_connect/workflow/tests/e2e/conftest.py` for fixtures + +**Step 1: Write the E2E test** + +```python +# commcare_connect/workflow/tests/e2e/test_flw_flags_workflow.py +"""E2E test for KMC FLW Flag Report workflow template.""" +import pytest +from playwright.sync_api import expect + + +@pytest.mark.e2e +class TestKMCFLWFlagsWorkflow: + """Test the KMC FLW Flag Report workflow template end-to-end.""" + + def test_flag_report_renders(self, auth_page, live_server_url, opportunity_id): + """Create a workflow run and verify the flag report UI renders.""" + page = auth_page + page.set_default_timeout(30000) + + # 1. Navigate to workflow list and create new run + page.goto(f"{live_server_url}/labs/workflow/?opportunity_id={opportunity_id}") + page.wait_for_load_state("networkidle") + + # Click "New Workflow" or similar + new_btn = page.locator("text=New Workflow").first + if new_btn.is_visible(): + new_btn.click() + page.wait_for_load_state("networkidle") + + # Select KMC FLW Flag Report template + template_card = page.locator("text=KMC FLW Flag Report").first + expect(template_card).to_be_visible(timeout=10000) + template_card.click() + + # Confirm creation + create_btn = page.locator("button:has-text('Create')").first + if create_btn.is_visible(): + create_btn.click() + + page.wait_for_load_state("networkidle") + + # 2. Wait for pipeline data to load (can take up to 120s) + page.set_default_timeout(120000) + root = page.locator("#workflow-root") + + # Verify KPI cards render + expect(root.locator("text=FLWs Analyzed")).to_be_visible(timeout=120000) + expect(root.locator("text=Total Cases")).to_be_visible() + + # Verify filter bar renders + expect(root.locator("text=All FLWs")).to_be_visible() + expect(root.locator("text=Any Flag")).to_be_visible() + expect(root.locator("text=2+ Flags")).to_be_visible() + + # Verify table renders with at least a header + expect(root.locator("th:has-text('Avg Vis')")).to_be_visible() + expect(root.locator("th:has-text('Flags')")).to_be_visible() + + # Verify action bar renders + expect(root.locator("text=Create Audits with AI Review")).to_be_visible() + + # 3. Cleanup — delete the workflow run + # Get run ID from URL + url = page.url + if '/run/' in url: + run_id = url.split('/run/')[1].rstrip('/') + csrf = page.evaluate("document.querySelector('[name=csrfmiddlewaretoken]')?.value || document.cookie.match(/csrftoken=([^;]+)/)?.[1] || ''") + if csrf and run_id: + page.request.delete( + f"{live_server_url}/labs/workflow/api/run/{run_id}/", + headers={"X-CSRFToken": csrf}, + ) +``` + +**Step 2: Run E2E test** + +```bash +pytest commcare_connect/workflow/tests/e2e/test_flw_flags_workflow.py -v --ds=config.settings.local -o "addopts=" --opportunity-id=874 +``` + +Expected: PASS (flag report renders with KPI cards, table, action bar) + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/tests/e2e/test_flw_flags_workflow.py +git commit -m "test: add E2E test for KMC FLW Flag Report template" +``` + +--- + +## Task 7: Refine pipeline field paths with MCP verification + +After the template is running, use the CommCare MCP server to verify that the pipeline field paths actually match the form JSON structure. Adjust any paths that return empty data. + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` (PIPELINE_SCHEMAS fields) + +**Step 1: Run the template and check which fields return data** + +Load the workflow in the browser, open DevTools Console, and check: +```javascript +// In browser console on the workflow page: +console.log('flw_flags:', window.__PIPELINE_DATA__?.flw_flags?.rows?.[0]); +console.log('weight_series:', window.__PIPELINE_DATA__?.weight_series?.rows?.slice(0, 3)); +``` + +**Step 2: Use MCP tools to verify paths if fields are empty** + +Use `get_form_json_paths` for the Registration form (`58991FD0-F6A7-4DA2-8C74-AE4655A424A7`) and Visit form (`42DFAFE1-C3B5-4F11-A400-827DA369F2C9`) on opportunity 874 to find the correct paths. + +Key paths to verify: +- `form.case_close_condition` — may need `form.case_close_check` or `form.grp_kmc_beneficiary.kmc_status` +- `form.child_alive` — exists on both forms +- `form.danger_signs_checklist.danger_sign_positive` — verify exact casing/nesting +- `form.hosp_lbl.date_hospital_discharge` — registration form only + +**Step 3: Update paths and re-test** + +Adjust any incorrect paths in PIPELINE_SCHEMAS. + +**Step 4: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "fix: correct pipeline field paths based on MCP verification" +``` + +--- + +## Summary + +| Task | Description | Key Files | +|------|-------------|-----------| +| 1 | Add count_distinct aggregation | query_builder.py, test_query_builder.py | +| 2 | Add per-field filter support | config.py, query_builder.py, data_access.py | +| 3 | Template skeleton + PIPELINE_SCHEMAS | kmc_flw_flags.py | +| 4 | Full RENDER_CODE (flags, UI, audit) | kmc_flw_flags.py | +| 5 | Register in __init__.py | __init__.py | +| 6 | E2E test | test_flw_flags_workflow.py | +| 7 | Verify & fix pipeline paths | kmc_flw_flags.py | diff --git a/docs/plans/2026-03-07-kmc-improvements.md b/docs/plans/2026-03-07-kmc-improvements.md new file mode 100644 index 000000000..c6dc4b4a7 --- /dev/null +++ b/docs/plans/2026-03-07-kmc-improvements.md @@ -0,0 +1,119 @@ +# KMC Longitudinal Tracking — Improvement Plan + +**Date:** 2026-03-07 +**Status:** Backlog + +## Context + +Screenshots taken of live KMC workflow with real data (opportunity 874, 1266 children, 5201 visits). Issues identified across dashboard, child list, and timeline views. + +## Issues + +### Dashboard + +#### 1. Second row KPI cards lack color coding +- "Below Avg Gain", "Reached 2.5kg", "Discharged" all have gray borders +- Should have distinct colors: yellow/warning for Below Avg, green for Reached 2.5kg, blue for Discharged +- **Effort:** Small (CSS only) + +#### 2. No percentage context on KPI cards +- "362 Overdue" is more meaningful with "29% of total" underneath +- Add small percentage or fraction to help managers gauge severity +- **Effort:** Small + +#### 3. Missing "Avg Visits/Child" card +- Design doc spec'd this card. The value is computed (shown in summary text as "4.1 visits/child avg") but not shown as a KPI card +- **Effort:** Small + +### Child List + +#### 4. FLW column shows raw usernames +- "gitgljfaw80gx37q0sph" is meaningless to users +- If display names aren't available from the pipeline, consider truncating or omitting +- **Effort:** Small — may need pipeline schema change to extract display name + +#### 5. Weight gain sign bug — "+-200g" +- Negative weight gains display as "+-200g (+-17%)" instead of "-200g (-17%)" +- The `+` prefix is being prepended regardless of sign +- Seen on child "rahan" (8 visits, current weight 1000g, birth weight 1200g) +- **Effort:** Small (string formatting fix in render code) + +#### 6. Last Visit column — no positive color +- Red for overdue (>14 days) is good +- Consider green for <7 days to show children on track +- **Effort:** Small + +### Child Timeline + +#### 7. Clinical detail panel is mostly empty +- Visit 8 for "waswa" (9 visits) shows Weight: "—", Height: "—", KMC Hours: "—" etc. +- The chart shows data points at those dates, so weight data exists in the pipeline +- **Root cause hypothesis:** The detail panel reads from specific field names that don't match what the pipeline actually extracts. Many pipeline fields may have wrong `path` values that don't match the actual CommCare form structure. +- **Effort:** Medium — need to inspect actual pipeline row data vs. what the detail panel expects + +#### 8. No selected-visit highlight on chart +- When clicking a visit in the sidebar, the corresponding chart point should be highlighted (larger dot, different color) +- Currently no visual connection between sidebar selection and chart +- **Effort:** Medium (Chart.js point styling) + +#### 9. Map marker legend missing +- Red circles with cross vs blue/green dots — unclear what they represent +- Need a small legend explaining marker colors +- **Effort:** Small + +#### 10. Visit sidebar "Visit 8" labeling +- Shows "Visit 8" at top (selected, most recent) but this is confusing — is it the 8th visit chronologically? The most recent? +- Consider showing visit date more prominently or clarifying numbering +- **Effort:** Small + +#### 11. "Visit Additional" label in sidebar +- Seen on child "rahan" — sidebar shows "Visit Additional" as form name instead of a visit number +- Should normalize non-standard form names to "Visit N" or display form name + visit number +- **Effort:** Small + +#### 12. Flat/negative weight not visually flagged +- "rahan" has weight stuck at ~1,000g across 8 visits over 6 weeks — a red flag for KMC +- Consider visual alert (red border, warning icon) when weight is stagnant below threshold +- **Effort:** Medium + +### General / Cross-cutting + +#### 13. Header fields mostly empty (DOB, Mother, Village, Subcounty) +- All children viewed show "-" for DOB, Mother, Village, Subcounty +- **Root cause hypothesis:** Same as #7 — pipeline schema `path` values likely don't match actual CommCare form field paths. The data exists in CommCare but isn't being extracted. +- This is probably the biggest systemic issue. Need to: + 1. Inspect a raw form submission to see actual field paths + 2. Compare against PIPELINE_SCHEMAS field definitions + 3. Update paths to match reality +- **Effort:** Medium — requires inspecting raw CommCare data + +#### 14. No export/download +- Program managers need to export child lists to Excel/CSV for reporting +- **Effort:** Medium + +#### 15. No loading state for child list +- With 1266 children, should show spinner or skeleton while list renders +- **Effort:** Small + +## Priority Order + +| Priority | Issue | Why | +|----------|-------|-----| +| P0 | #5 Weight gain sign bug | Data accuracy — shows wrong numbers | +| P0 | #7, #13 Pipeline field path mismatch | Core feature gap — most fields empty | +| P1 | #1 KPI card colors | Quick visual win | +| P1 | #2 KPI percentages | Quick context win | +| P1 | #3 Avg visits card | Missing spec'd feature | +| P1 | #8 Chart visit highlight | Core interaction missing | +| P2 | #4 FLW usernames | Display quality | +| P2 | #6 Last visit green color | Visual polish | +| P2 | #9 Map legend | Usability | +| P2 | #10, #11 Visit labeling | Clarity | +| P2 | #12 Stagnant weight alert | Clinical value | +| P3 | #14 Export | Feature request | +| P3 | #15 Loading state | Polish | + +## Notes + +- Pagination was considered but isn't causing a performance issue yet with 1266 rows — deferred +- The pipeline field path mismatch (#7, #13) is likely the root cause of most "empty data" issues and should be investigated first by inspecting raw CommCare form data against the PIPELINE_SCHEMAS paths diff --git a/docs/plans/2026-03-08-kmc-flw-flags-v2.md b/docs/plans/2026-03-08-kmc-flw-flags-v2.md new file mode 100644 index 000000000..382c9461f --- /dev/null +++ b/docs/plans/2026-03-08-kmc-flw-flags-v2.md @@ -0,0 +1,470 @@ +# KMC FLW Flag Report V2 — Bug Fixes, Logic Alignment, and UI Improvements + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Fix critical bugs, align flag logic with Neal's methodology, improve column headers/values readability, and add an audit configuration modal. + +**Architecture:** All changes are in `kmc_flw_flags.py` (PIPELINE_SCHEMAS + RENDER_CODE) with one small fix in audit `data_access.py`. Weight pair logic, enrollment computation, and formatting all happen client-side in RENDER_CODE. The weight_series pipeline gains two fields (reg_date, discharge_date) for per-case enrollment timing. + +**Tech Stack:** Python (pipeline schema), JSX/React (RENDER_CODE), Tailwind CSS + +--- + +## Task 1: Fix weight gain calculation (×1000 bug) + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` — RENDER_CODE `computeWeightMetrics` function + +**Context:** Weight is already in grams from CommCare (`child_weight_visit` type=Int, label "Weight of SVN (grams)"). The code multiplies by 1000 treating grams as kg, producing absurd values like 16239 g/day instead of ~16 g/day. + +**Step 1: Fix the calculation** + +In `computeWeightMetrics`, find line: +```javascript +totalDailyGain += (diff * 1000) / days; // convert kg to g +``` + +Replace with: +```javascript +totalDailyGain += diff / days; // weight already in grams +``` + +**Step 2: Verify mentally** + +Example: weight goes from 1500g to 1650g over 10 days = 150/10 = 15 g/day. Correct. + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "fix: remove erroneous ×1000 in weight gain calculation (weight already in grams)" +``` + +--- + +## Task 2: Fix audit creation — selected FLWs not filtered + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` — RENDER_CODE `handleCreateAudits` +- Modify: `commcare_connect/audit/data_access.py:91-119` — `AuditCriteria.from_dict` (defensive fallback) + +**Context:** Template sends `selected_usernames` but `AuditCriteria.from_dict()` reads `selected_flw_user_ids`. Fix both sides. + +**Step 1: Fix the template key name** + +In `handleCreateAudits`, find: +```javascript +selected_usernames: selectedUsernames +``` + +Replace with: +```javascript +selected_flw_user_ids: selectedUsernames +``` + +**Step 2: Add defensive fallback in AuditCriteria.from_dict** + +In `data_access.py`, find: +```python +selected_flw_user_ids=data.get("selected_flw_user_ids", []), +``` + +Replace with: +```python +selected_flw_user_ids=data.get("selected_flw_user_ids") or data.get("selected_usernames", []), +``` + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py commcare_connect/audit/data_access.py +git commit -m "fix: pass selected_flw_user_ids so audits only cover selected FLWs" +``` + +--- + +## Task 3: Fix flag logic to match Neal's methodology + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` — RENDER_CODE data processing section + +**Context:** Multiple formula corrections needed per Neal's doc. + +**Step 1: Fix flag_visits denominator (closed non-mortality cases)** + +Find: +```javascript +var avgVisits = closedCases > 0 ? totalVisits / closedCases : null; +``` + +Replace with: +```javascript +var nonMortClosed = closedCases - deaths; +var avgVisits = nonMortClosed > 0 ? totalVisits / nonMortClosed : null; +``` + +**Step 2: Fix mortality rate denominator (total cases, not closed)** + +Find: +```javascript +var mortRate = closedCases > 0 ? deaths / closedCases : null; +``` + +Replace with: +```javascript +var mortRate = totalCases > 0 ? deaths / totalCases : null; +``` + +**Step 3: Add weight pair validation criteria (500g-5000g, 1-30 days apart)** + +In `computeWeightMetrics`, after parsing weights, add validation. Find: +```javascript +var w1 = parseFloat(prev.weight); +var w2 = parseFloat(curr.weight); +if (isNaN(w1) || isNaN(w2) || w1 <= 0) continue; +``` + +Replace with: +```javascript +var w1 = parseFloat(prev.weight); +var w2 = parseFloat(curr.weight); +if (isNaN(w1) || isNaN(w2)) continue; +// Neal: weights must be 500g-5000g +if (w1 < 500 || w1 > 5000 || w2 < 500 || w2 > 5000) continue; +// Neal: visits must be 1-30 days apart +var d1 = new Date(prev.visit_date); +var d2 = new Date(curr.visit_date); +var daysBetween = (d2 - d1) / (1000 * 60 * 60 * 24); +if (daysBetween < 1 || daysBetween > 30) continue; +``` + +Then update the daily gain calculation below to reuse `daysBetween` instead of recomputing `days`: +```javascript +if (daysBetween > 0) { + totalDailyGain += diff / daysBetween; // weight already in grams + gainPairCount++; +} +``` + +And remove the duplicate date computation that was there before. + +**Step 4: Fix flag_wt_gain threshold direction** + +Neal says "mean daily weight gain exceeds 60 g/day" — flag for HIGH gain, not low. Currently our code has: +```javascript +flags.low_wt_gain = wm.weight_pairs >= MIN_CASES.weight && wm.mean_daily_gain !== null && wm.mean_daily_gain < THRESHOLDS.wt_gain; +``` + +But the threshold is 60 and the flag name in Neal's doc is `flag_wt_gain` (high). Fix: +```javascript +flags.high_wt_gain = wm.weight_pairs >= MIN_CASES.weight && wm.mean_daily_gain !== null && wm.mean_daily_gain > THRESHOLDS.wt_gain; +``` + +Also update the flag reference in the table rendering from `low_wt_gain` to `high_wt_gain`. + +**Step 5: Fix minimum case requirements for mortality flags** + +Neal says minimum is "20 closed cases" for mortality. Currently we check `closedCases >= MIN_CASES.mort`. But the MIN_CASES.mort is already 20, so this is fine. However, for flag_mort_low we should also check `totalCases >= MIN_CASES.mort`: +```javascript +flags.high_mort = totalCases >= MIN_CASES.mort && mortRate !== null && mortRate > THRESHOLDS.mort_high; +flags.low_mort = totalCases >= MIN_CASES.mort && mortRate !== null && mortRate < THRESHOLDS.mort_low; +``` + +**Step 6: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "fix: align flag computations with Neal's methodology + +- flag_visits: divide by closed non-mortality cases +- mortality: use total cases as denominator +- weight pairs: filter 500-5000g range, 1-30 day window +- flag_wt_gain: flag HIGH gain (>60g/day), not low +- mortality minimums: check totalCases not closedCases" +``` + +--- + +## Task 4: Add enrollment timing to weight_series pipeline + client-side computation + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` — PIPELINE_SCHEMAS and RENDER_CODE + +**Context:** Neal's flag_enroll needs ">35% of cases enrolled 8+ days post-discharge". This requires per-case data. We add reg_date and discharge_date to the weight_series pipeline, then compute per-case enrollment lateness client-side. + +**Step 1: Add fields to weight_series pipeline** + +In PIPELINE_SCHEMAS, add two fields to the `weight_series` schema `fields` array: +```python +{ + "name": "reg_date", + "paths": ["form.reg_date", "form.grp_kmc_beneficiary.reg_date"], + "aggregation": "first", + "transform": "date", +}, +{ + "name": "discharge_date", + "path": "form.hosp_lbl.date_hospital_discharge", + "aggregation": "first", + "transform": "date", +}, +``` + +**Step 2: Replace enrollment computation in RENDER_CODE** + +Remove the old single-value enrollment logic (the `enrollLate` boolean computed from first reg_date/discharge_date in processedData). + +Replace with a function that computes per-case enrollment lateness from weight_series data: + +```javascript +var computeEnrollmentMetrics = function(username, weightRows) { + var myRows = (weightRows || []).filter(function(r) { return r.username === username; }); + if (myRows.length === 0) return { pctLateEnroll: null, casesWithDates: 0 }; + + // Group by case, take first reg_date and discharge_date per case + var byCase = {}; + myRows.forEach(function(r) { + var cid = r.beneficiary_case_id; + if (!cid) return; + if (!byCase[cid]) byCase[cid] = { reg_date: null, discharge_date: null }; + if (r.reg_date && !byCase[cid].reg_date) byCase[cid].reg_date = r.reg_date; + if (r.discharge_date && !byCase[cid].discharge_date) byCase[cid].discharge_date = r.discharge_date; + }); + + var casesWithDates = 0; + var lateCases = 0; + Object.keys(byCase).forEach(function(cid) { + var c = byCase[cid]; + if (c.reg_date && c.discharge_date) { + casesWithDates++; + var rd = new Date(c.reg_date); + var dd = new Date(c.discharge_date); + var daysDiff = (rd - dd) / (1000 * 60 * 60 * 24); + if (daysDiff > 8) lateCases++; + } + }); + + return { + pctLateEnroll: casesWithDates >= 10 ? lateCases / casesWithDates : null, + casesWithDates: casesWithDates + }; +}; +``` + +**Step 3: Update processedData to use new enrollment metrics** + +Replace the `enrollLate` boolean logic with: +```javascript +var em = computeEnrollmentMetrics(u, weightRows); +``` + +And in the return object: +```javascript +pctLateEnroll: em.pctLateEnroll, +casesWithDates: em.casesWithDates, +``` + +**Step 4: Update flag computation** + +Replace: +```javascript +flags.late_enroll = closedCases >= MIN_CASES.enroll && enrollLate === true; +``` + +With: +```javascript +flags.late_enroll = em.casesWithDates >= MIN_CASES.enroll && em.pctLateEnroll !== null && em.pctLateEnroll > THRESHOLDS.enroll; +``` + +**Step 5: Remove old enrollment fields from flw_flags pipeline** + +Remove `discharge_date` and `reg_date` from the `flw_flags` pipeline since enrollment is now computed from weight_series. (These fields only made sense as aggregated "first" values, which was the wrong approach.) + +**Step 6: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "fix: compute enrollment flag as per-case percentage from weight_series data + +Per Neal's methodology: >35% of cases enrolled 8+ days post-discharge. +Previously used a single boolean from first reg/discharge dates." +``` + +--- + +## Task 5: Improve column headers, values, and table UX + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` — RENDER_CODE table section + +**Context:** Headers are cryptic ("AVG VIS", "MORT%", "8+ DAYS", "GAIN"). Values lack context. "NE" not explained. + +**Step 1: Replace column headers with descriptive text + tooltips** + +Replace the column header `` elements with descriptive names. Add a `title` attribute for tooltips with Neal's definitions. New headers: + +| Old | New Header | Tooltip (abbreviated) | +|-----|-----------|----------------------| +| Cases | Cases | Total distinct beneficiary cases | +| Avg Vis | Visits/Case | Avg visits per closed non-mortality case. Flag: <3.0 | +| Mort% | Mortality | Deaths as % of total cases. Flag: <2% or >20% | +| 8+ Days | Late Enroll | % of cases enrolled 8+ days post-discharge. Flag: >35% | +| Danger | Danger Signs | % of visits with danger sign positive. Flag: >30% or 0% | +| Wt Loss | Wt Loss | % of visit pairs showing weight decrease. Flag: >15% | +| Gain | Gain (g/d) | Mean daily weight gain in grams. Flag: >60 g/day | +| Wt Zero | Wt Zero | % of visit pairs with exactly zero weight change. Flag: >30% | + +**Step 2: Fix value formatting** + +- Enrollment: show percentage (e.g., "42.1%") instead of Yes/No +- Gain: show with units, e.g., "15.2 g/d" +- Add "NE" with title tooltip explaining "Not Eligible — insufficient data" + +Update the `fmt` function: +```javascript +var fmt = function(val, type) { + if (val === null || val === undefined) return null; // return null, handle NE in rendering + if (type === 'pct') return (val * 100).toFixed(1) + '%'; + if (type === 'dec') return val.toFixed(1); + if (type === 'gain') return val.toFixed(1) + ' g/d'; + return String(val); +}; +``` + +For table cells, render NE with a tooltip: +```javascript +var renderCell = function(val, type, flagKey, flags) { + var formatted = fmt(val, type); + var flagged = flagKey && flags[flagKey]; + var cellClass = 'px-3 py-3 text-sm text-center ' + (flagged ? 'bg-red-50 text-red-800 font-semibold' : ''); + if (formatted === null) { + return React.createElement('td', { className: cellClass, title: 'Not Eligible — insufficient data for this metric' }, + React.createElement('span', { className: 'text-gray-400 italic' }, 'NE') + ); + } + return React.createElement('td', { className: cellClass }, formatted); +}; +``` + +**Step 3: Add flag breakdown on hover/expand for flag count column** + +Show which specific flags are triggered when hovering the flag count badge: +```javascript +var flagLabels = { + low_visits: 'Low Visits', + high_mort: 'High Mortality', + low_mort: 'Low Mortality', + late_enroll: 'Late Enrollment', + high_danger: 'High Danger Signs', + zero_danger: 'Zero Danger Signs', + high_wt_loss: 'Weight Loss', + high_wt_gain: 'High Weight Gain', + high_wt_zero: 'Zero Weight Change' +}; +``` + +In the flag count cell, add a title attribute listing triggered flags: +```javascript +var activeFlags = Object.keys(d.flags).filter(function(k) { return d.flags[k]; }); +var flagTitle = activeFlags.map(function(k) { return flagLabels[k] || k; }).join(', '); +``` + +**Step 4: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "improve: descriptive column headers, value formatting, NE tooltips, flag breakdown" +``` + +--- + +## Task 6: Add audit configuration modal + +**Files:** +- Modify: `commcare_connect/workflow/templates/kmc_flw_flags.py` — RENDER_CODE + +**Context:** Currently clicking "Create Audits" immediately fires with hardcoded last-week dates. Need a modal that lets the user configure: date range, visits per FLW, AI agent, and title before creating. + +**Step 1: Add modal state** + +```javascript +var _modal = React.useState(false); +var showModal = _modal[0]; var setShowModal = _modal[1]; +var _auditConfig = React.useState({ + datePreset: 'last_week', + startDate: '', + endDate: '', + countPerFlw: 10, + aiAgent: 'scale_validation', + title: '' +}); +var auditConfig = _auditConfig[0]; var setAuditConfig = _auditConfig[1]; +``` + +**Step 2: Initialize date defaults** + +Add a useEffect that computes last week dates and sets them in auditConfig, matching the existing pattern from audit_with_ai_review.py. + +**Step 3: Replace the direct "Create Audits" button with modal opener** + +Change the sticky action bar button from calling `handleCreateAudits` directly to `setShowModal(true)`. + +**Step 4: Build the modal component** + +Modal with: +- **Title** text input (auto-generated default: "FLW Flag Audit {startDate} to {endDate}") +- **Date preset** buttons: "Last Week", "Last 2 Weeks", "Last Month", "Custom" +- **Custom date range** inputs (shown when preset is "custom") +- **Visits per FLW** number input (default 10) +- **AI Agent** selector (default "scale_validation", option for "none") +- **Selected FLW count** summary (read-only) +- **Create** and **Cancel** buttons + +Modal renders as a fixed overlay with backdrop: +```javascript +{showModal && React.createElement('div', { className: 'fixed inset-0 z-50 flex items-center justify-center bg-black bg-opacity-50' }, + React.createElement('div', { className: 'bg-white rounded-xl shadow-2xl w-full max-w-lg mx-4 p-6' }, + // Modal content... + ) +)} +``` + +**Step 5: Update handleCreateAudits to use modal config** + +Replace hardcoded date computation with values from `auditConfig` state. Close modal on submit. + +**Step 6: Commit** + +```bash +git add commcare_connect/workflow/templates/kmc_flw_flags.py +git commit -m "feat: add audit configuration modal with date range, visits per FLW, and AI agent options" +``` + +--- + +## Task 7: Manual verification + +**Step 1: Start the server and test** + +```bash +inv up && python manage.py runserver +``` + +Navigate to the KMC FLW Flag Report workflow for opportunity 874. Verify: + +1. **Weight gain** column shows reasonable values (10-30 g/day range, not 16000+) +2. **Column headers** are descriptive with tooltips +3. **NE** values have italic styling and tooltip +4. **Enrollment** column shows percentages, not Yes/No +5. **Flag count** hover shows which flags are triggered +6. **Select 2 FLWs** → click Create Audits → modal appears +7. **Configure dates** and click Create → only selected FLWs get audits +8. **Mortality rate** uses total cases denominator + +**Step 2: Run E2E test** + +```bash +pytest commcare_connect/workflow/tests/e2e/test_flw_flags_workflow.py -v --ds=config.settings.local -o "addopts=" --opportunity-id=874 +``` + +**Step 3: Final commit if any adjustments needed** diff --git a/docs/plans/2026-03-09-workflow-docs-dry-design.md b/docs/plans/2026-03-09-workflow-docs-dry-design.md new file mode 100644 index 000000000..87457cc8f --- /dev/null +++ b/docs/plans/2026-03-09-workflow-docs-dry-design.md @@ -0,0 +1,56 @@ +# Workflow Engine Documentation & Tooling DRY Refactor + +**Date:** 2026-03-09 +**Status:** Approved + +## Problem + +Workflow authoring knowledge is fragmented across 5+ locations (skill, agent instructions, README, base.py, individual templates). Building workflows from external documents (indicator specs, monitoring frameworks) requires undocumented knowledge about MCP field discovery. The in-product AI agent and Claude Code maintain duplicate reference material that drifts. + +## Architecture + +Single source of truth: `commcare_connect/workflow/WORKFLOW_REFERENCE.md` + +``` +WORKFLOW_REFERENCE.md <- canonical reference (all detail lives here) + ^ read by +workflow_agent.py <- in-product agent loads at module init + ^ linked from +.claude/skills/workflow-templates/SKILL.md <- process guide + link to reference + ^ linked from +CLAUDE.md <- brief summary + link +``` + +## Deliverables + +### 1. `commcare_connect/workflow/WORKFLOW_REFERENCE.md` + +Comprehensive authoring guide with these sections: + +1. **Template Anatomy** — DEFINITION, PIPELINE_SCHEMAS, RENDER_CODE, TEMPLATE export +2. **Pipeline Schema Deep-Dive** — fields, paths, aggregations, transforms, terminal_stage, linking_field, multi-path fallback +3. **Discovering Field Paths** — using MCP `get_form_json_paths` or manual CommCare inspection +4. **Render Code Contract** — props interface, constraints (`var` not `const/let`, no imports), CDN libs +5. **Actions API** — complete reference with signatures +6. **Common UI Patterns** — KPI cards, tables, charts, maps, status badges, SSE streaming +7. **Building from External Specs** — indicator document -> pipeline fields -> render code + +### 2. Improved `workflow-templates` Skill + +Restructure as two-phase process: +- **Phase 1 (conditional):** When building from external spec — analyze document, use MCP tools to discover field paths, map indicators to pipeline fields +- **Phase 2:** Build the template — references WORKFLOW_REFERENCE.md instead of duplicating details + +### 3. CLAUDE.md Addition + +Add `## Workflow Engine` section (~8 lines) with mental model and link to reference. + +### 4. Agent Refactor + +`workflow_agent.py` loads WORKFLOW_REFERENCE.md at module init, replacing hardcoded duplicate material in WORKFLOW_AGENT_INSTRUCTIONS. + +## Design Decisions + +- **One skill, not two** — `workflow-from-spec` merged into `workflow-templates` as a conditional phase. Avoids maintaining duplicate schema/render/action documentation. +- **Reference file in workflow dir, not docs/** — it's a developer/AI reference, not user documentation. Lives next to the code it describes. +- **Agent loads file at import time** — simple, no runtime overhead, file changes picked up on server restart. Same pattern as templates auto-discovery. diff --git a/docs/plans/2026-03-09-workflow-docs-dry-plan.md b/docs/plans/2026-03-09-workflow-docs-dry-plan.md new file mode 100644 index 000000000..f4a510ab5 --- /dev/null +++ b/docs/plans/2026-03-09-workflow-docs-dry-plan.md @@ -0,0 +1,348 @@ +# Workflow Docs & Tooling DRY Refactor — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Create a single source of truth (`WORKFLOW_REFERENCE.md`) for workflow authoring, then wire up CLAUDE.md, the skill, and the in-product AI agent to reference it instead of duplicating material. + +**Architecture:** One canonical reference file in the workflow directory. The skill becomes a process guide that links to it. The in-product agent loads it at module init. CLAUDE.md gets a brief section pointing to it. + +**Tech Stack:** Markdown (reference), Python (agent refactor), SKILL.md (skill rewrite) + +--- + +### Task 1: Create `WORKFLOW_REFERENCE.md` + +**Files:** +- Create: `commcare_connect/workflow/WORKFLOW_REFERENCE.md` + +**Step 1: Write the reference document** + +Create the canonical reference with these sections. Content is synthesized from existing sources (types.ts, data_access.py, skill, agent instructions, template examples): + +```markdown +# Workflow Engine Reference + +Complete reference for building workflow templates. This file is the single source +of truth — the Claude Code skill, in-product AI agent, and CLAUDE.md all reference it. + +## Template Anatomy + +Each template is a single Python file in `commcare_connect/workflow/templates/`. +Auto-discovered by the registry in `__init__.py`. + +### Required Exports + +DEFINITION dict — workflow configuration (name, statuses, config) +RENDER_CODE string — React JSX component +TEMPLATE dict — ties everything together with key, icon, color +Optional: PIPELINE_SCHEMA (single) or PIPELINE_SCHEMAS (multiple) + +### Minimal Example (performance_review.py) +[Include trimmed version of performance_review.py showing structure] + +### Multi-Pipeline Example (kmc_longitudinal.py) +[Show the PIPELINE_SCHEMAS pattern with aliases] + +## Pipeline Schema Deep-Dive + +### Schema Structure +[Full schema with all fields documented] + +### Fields +- name: field identifier (used in rows.computed.{name} or rows.custom_fields.{name}) +- path: dot-notated JSON path (e.g., "form.anthropometric.child_weight_visit") +- paths: array of fallback paths — tried in order, first non-null wins +- aggregation: first, last, count, sum, avg, min, max, list, count_unique +- transform: "float", "int", "kg_to_g", "date", "string" (or omit for raw string) +- filter_path / filter_value: only include rows where filter_path == filter_value +- description: human-readable label + +### Terminal Stage +- "visit_level": one row per form submission. Fields in row.computed.{name} +- "aggregated": one row per grouping_key. Custom fields in row.custom_fields.{name} + +### Grouping Key +- "username": group by FLW (most common) +- "entity_id": group by delivery unit +- "deliver_unit_id": group by delivery unit ID + +### Linking Field +For visit_level pipelines, linking_field specifies which field connects +visits to a logical entity (e.g., beneficiary_case_id links visits to a child). + +### Data Source +[Document data_source options] + +### Histograms +[Document histogram computation structure] + +## Discovering Field Paths + +### Using the MCP Server (Claude Code) +1. get_opportunity_apps(opportunity_id) → get domain and app IDs +2. get_app_structure(domain, app_id) → see modules, forms, xmlns +3. get_form_json_paths(xmlns, domain, app_id) → exact JSON paths for each question +4. Use json_path values directly in PIPELINE_SCHEMAS field definitions + +### Without MCP (manual / in-product agent) +- CommCare HQ → Application → Form → question ID maps to form.{group}.{question_id} +- Nested groups: form.{group1}.{group2}.{question_id} +- Case properties: form.case.@case_id, form.case.update.{property} +- Meta fields: form.meta.timeEnd, form.meta.instanceID, form.meta.location.#text + +### Common Meta Paths +[Table of always-available paths] + +## Render Code Contract + +### Function Signature +function WorkflowUI({ definition, instance, workers, pipelines, links, actions, onUpdateState }) + +### Constraints +- Must define a function named WorkflowUI (not const/let — use function declaration) +- Use var for all variable declarations (Babel standalone + eval limitation) +- No imports — only React global is available +- CDN libs available: Chart.js 4.4.0 (window.Chart), Leaflet 1.9.4 (window.L) +- Tailwind CSS classes available for styling + +### Props Reference +[Full props table derived from types.ts] + +### Pipeline Data Access +[visit_level vs aggregated patterns with code examples] + +## Actions API + +### Task Management +[createTask, openTaskCreator, getTaskDetail, updateTask signatures] + +### Audit Creation +[createAudit, getAuditStatus, streamAuditProgress, cancelAudit] + +### Job Management +[startJob, streamJobProgress, cancelJob, deleteRun] + +### OCS Integration +[checkOCSStatus, listOCSBots, createTaskWithOCS, initiateOCSSession] + +### MBW-Specific +[saveWorkerResult, completeRun] + +### AI Transcript +[getAITranscript, getAISessions, saveAITranscript] + +## Common UI Patterns + +### KPI Summary Cards +[Code snippet] + +### Sortable/Filterable Table +[Code snippet] + +### Status Badges +[Code snippet with color map] + +### Chart.js Integration +[Code snippet showing window.Chart usage] + +### Leaflet Map +[Code snippet showing window.L usage] + +### SSE Pipeline Loading +[Code snippet for EventSource pattern] + +### Progress Tracking (Jobs/Audits) +[Code snippet] + +## Building from External Specs + +### Process +1. Analyze the source document — identify indicators, data points, groupings +2. Map each indicator to a CommCare form question (use MCP or manual inspection) +3. Decide terminal_stage: do you need per-visit rows or per-worker aggregates? +4. Write PIPELINE_SCHEMAS with correct paths, aggregations, transforms +5. Design RENDER_CODE to visualize the indicators +6. Wire into TEMPLATE export + +### Indicator → Pipeline Field Mapping +[Examples of common indicator types and how to express them] + +### Validation Checklist +- Template key is unique (check __init__.py) +- All field paths verified via MCP or manual inspection +- Test with ?edit=true to verify pipeline data is non-empty +- Check browser console for Babel transpilation errors +``` + +**Step 2: Verify the document renders correctly** + +Run: `python -c "open('commcare_connect/workflow/WORKFLOW_REFERENCE.md').read()"` +Expected: No errors, file exists and is readable. + +**Step 3: Commit** + +```bash +git add commcare_connect/workflow/WORKFLOW_REFERENCE.md +git commit -m "docs: create WORKFLOW_REFERENCE.md as single source of truth for workflow authoring" +``` + +--- + +### Task 2: Update CLAUDE.md with Workflow Engine section + +**Files:** +- Modify: `CLAUDE.md` (insert after App Map section, before Key Commands) + +**Step 1: Add the Workflow Engine section** + +Insert between the App Map table and Key Commands: + +```markdown +## Workflow Engine + +Templates are single Python files in `workflow/templates/` exporting DEFINITION (statuses, config), RENDER_CODE (React JSX string transpiled by Babel), and optionally PIPELINE_SCHEMAS (CommCare form field extraction). The registry auto-discovers them. Pipeline schemas map CommCare form JSON paths to extracted fields with aggregations and transforms. Render code receives `{definition, instance, workers, pipelines, links, actions, onUpdateState}` as props. + +Use the MCP server's `get_form_json_paths` tool to discover correct field paths when building pipeline schemas. + +**Full reference:** [WORKFLOW_REFERENCE.md](commcare_connect/workflow/WORKFLOW_REFERENCE.md) +``` + +**Step 2: Verify CLAUDE.md is valid** + +Visually check the file reads correctly. + +**Step 3: Commit** + +```bash +git add CLAUDE.md +git commit -m "docs: add Workflow Engine section to CLAUDE.md linking to reference" +``` + +--- + +### Task 3: Rewrite the `workflow-templates` skill + +**Files:** +- Modify: `.claude/skills/workflow-templates/SKILL.md` + +**Step 1: Rewrite the skill as a process guide** + +The skill should focus on the *process* of building a template, NOT duplicate reference material. It should link to WORKFLOW_REFERENCE.md for details. + +New structure: +1. Phase 1 (conditional): Interpreting external specs — analyze document, use MCP tools +2. Phase 2: Build the template — with links to reference for schema/render/action details +3. Validation checklist + +Key change: Remove all the inline props tables, action examples, and pattern code. Replace with "See WORKFLOW_REFERENCE.md sections X and Y." + +**Step 2: Verify skill file** + +Run: `python -c "open('.claude/skills/workflow-templates/SKILL.md').read()"` + +**Step 3: Commit** + +```bash +git add .claude/skills/workflow-templates/SKILL.md +git commit -m "refactor: rewrite workflow-templates skill as process guide referencing WORKFLOW_REFERENCE.md" +``` + +--- + +### Task 4: Refactor `workflow_agent.py` to load from reference + +**Files:** +- Modify: `commcare_connect/ai/agents/workflow_agent.py` + +**Step 1: Add reference file loading** + +At module level, load WORKFLOW_REFERENCE.md and include it in agent instructions: + +```python +from pathlib import Path + +_REFERENCE_PATH = Path(__file__).resolve().parents[2] / "workflow" / "WORKFLOW_REFERENCE.md" + +def _load_reference() -> str: + """Load the workflow reference document for agent context.""" + try: + return _REFERENCE_PATH.read_text(encoding="utf-8") + except FileNotFoundError: + logger.warning(f"Workflow reference not found at {_REFERENCE_PATH}") + return "" +``` + +**Step 2: Slim down WORKFLOW_AGENT_INSTRUCTIONS** + +Replace the hardcoded schema/props/actions documentation with a reference to the loaded file. Keep only the agent-specific instructions (context awareness, tool usage rules, critical rules): + +```python +WORKFLOW_AGENT_INSTRUCTIONS = f""" +You are an expert helping users build data-driven workflows with custom React UIs. + +## Context Awareness +[Keep existing context awareness section] + +## Reference +The full workflow authoring reference is below. Use it to understand template structure, +pipeline schemas, render code constraints, and available actions. + +{_load_reference()} + +## Tools Available +[Keep existing tools section] + +## When to Use Which Tool +[Keep existing decision tree] + +## CRITICAL RULES +[Keep existing rules] +""" +``` + +**Step 3: Run existing tests to ensure nothing breaks** + +Run: `pytest commcare_connect/ai/ -v --tb=short` +Expected: All tests pass (or no tests exist for this module — verify). + +**Step 4: Commit** + +```bash +git add commcare_connect/ai/agents/workflow_agent.py +git commit -m "refactor: load WORKFLOW_REFERENCE.md in workflow agent instead of hardcoding docs" +``` + +--- + +### Task 5: Update workflow README to link to reference + +**Files:** +- Modify: `commcare_connect/workflow/README.md` + +**Step 1: Add reference link** + +Add a prominent link near the top of the README: + +```markdown +**Full authoring guide:** [WORKFLOW_REFERENCE.md](WORKFLOW_REFERENCE.md) — template anatomy, pipeline schemas, render code contract, actions API +``` + +**Step 2: Commit** + +```bash +git add commcare_connect/workflow/README.md +git commit -m "docs: link workflow README to WORKFLOW_REFERENCE.md" +``` + +--- + +### Task 6: Update auto-memory + +**Files:** +- Modify: `~/.claude/projects/.../memory/MEMORY.md` + +**Step 1: Add entry about the DRY refactor** + +Add a note about the single source of truth pattern so future sessions know about it. + +**Step 2: Done — no commit needed for memory files** diff --git a/e2e_console_log.txt b/e2e_console_log.txt new file mode 100644 index 000000000..aab85df55 --- /dev/null +++ b/e2e_console_log.txt @@ -0,0 +1,379 @@ +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[warning] Alpine Warning: You can't use [x-collapse] without first installing the "Collapse" plugin here: https://alpinejs.dev/plugins/collapse JSHandle@node +[warning] Alpine Expression Error: this.$cleanup is not a function + +Expression: "init() { + // Auto-connect to SSE if job is running + if (this.jobStatus === 'running' && this.jobId) { + this.connectToJobStream(); + } + // Cleanup on component destroy + this.$cleanup(() => { + if (this.eventSource) this.eventSource.close(); + if (this.pollInterval) clearInterval(this.pollInterval); + }); + }" + + JSHandle@node +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[ERROR] this.$cleanup is not a function +[info] %cDownload the React DevTools for a better development experience: https://reactjs.org/link/react-devtools font-weight:bold +[log] Workflow data loaded: {definition: Object, definition_id: 1432, opportunity_id: 874, render_code: function WorkflowUI({ definition, instance, worker + )} + + ); +}, instance: Object} +[log] Workflow mounted successfully +[log] DynamicWorkflow: useMemo triggered, babelLoaded: false renderCode length: 49354 +[log] DynamicWorkflow: Babel not loaded yet +[log] DynamicWorkflow: useMemo triggered, babelLoaded: false renderCode length: 49354 +[log] DynamicWorkflow: Babel not loaded yet +[log] DynamicWorkflow: useMemo triggered, babelLoaded: true renderCode length: 49354 +[log] DynamicWorkflow: Attempting to transpile code, length: 49354 +[log] DynamicWorkflow: Transpiled successfully +[log] DynamicWorkflow: Component created successfully +[log] DynamicWorkflow: useMemo triggered, babelLoaded: true renderCode length: 49354 +[log] DynamicWorkflow: Attempting to transpile code, length: 49354 +[log] DynamicWorkflow: Transpiled successfully +[log] DynamicWorkflow: Component created successfully \ No newline at end of file diff --git a/e2e_flw_flags_ui.png b/e2e_flw_flags_ui.png new file mode 100644 index 000000000..f77fa6052 Binary files /dev/null and b/e2e_flw_flags_ui.png differ diff --git a/e2e_kmc_console.txt b/e2e_kmc_console.txt new file mode 100644 index 000000000..bb0bf4503 --- /dev/null +++ b/e2e_kmc_console.txt @@ -0,0 +1,51 @@ +URL: http://127.0.0.1:8001/labs/workflow/1432/run/?opportunity_id=874&run_id=1639 + +=== CONSOLE MESSAGES === + +=== PAGE TEXT === +Menu +Connect Labs +Opp: +KMC PIPN - New Opportunity (id: 874 visits: 5259) +JO +Workflows +Weekly Audit with AI Review +Weekly Audit with AI Review + +Create weekly audit sessions per FLW with optional AI image validation + +Selected Opportunity +Audit for Feb 23, 2026 - Mar 1, 2026 + +Date range audit - Weight validation + +AI Review Enabled +Audit Type +One audit per FLW +Related Fields +Image: +anthropometric/upload_weight_image +Reading: +child_weight_visit +Visit Selection +Date Range +Last N Visits +Last Week +Last 7 Days +Last 14 Days +Last 30 Days +This Month +Last Month +Custom +Start +End +Visit Filters +Only include visits with weight image +Excludes visits without an uploaded scale photo +Only include visits with weight reading +Excludes visits without a recorded weight value +AI Review Agent +None - Skip AI review +Scale Image Validation +Validates weight readings against scale images using ML vision +Create Weekly Audit with AI Review \ No newline at end of file diff --git a/e2e_kmc_debug.png b/e2e_kmc_debug.png new file mode 100644 index 000000000..bfe5320d7 Binary files /dev/null and b/e2e_kmc_debug.png differ diff --git a/e2e_step6_after_click.png b/e2e_step6_after_click.png new file mode 100644 index 000000000..986db704a Binary files /dev/null and b/e2e_step6_after_click.png differ diff --git a/e2e_step6_debug.png b/e2e_step6_debug.png new file mode 100644 index 000000000..04ff3c813 Binary files /dev/null and b/e2e_step6_debug.png differ diff --git a/e2e_step6_progress.png b/e2e_step6_progress.png new file mode 100644 index 000000000..fd55f0ba9 Binary files /dev/null and b/e2e_step6_progress.png differ diff --git a/kmc_fix_childlist.png b/kmc_fix_childlist.png new file mode 100644 index 000000000..23ad2da68 Binary files /dev/null and b/kmc_fix_childlist.png differ diff --git a/kmc_fix_dashboard.png b/kmc_fix_dashboard.png new file mode 100644 index 000000000..20c9b031d Binary files /dev/null and b/kmc_fix_dashboard.png differ diff --git a/kmc_fix_timeline1.png b/kmc_fix_timeline1.png new file mode 100644 index 000000000..b37b76471 Binary files /dev/null and b/kmc_fix_timeline1.png differ diff --git a/kmc_fix_timeline2.png b/kmc_fix_timeline2.png new file mode 100644 index 000000000..4d3a4db3e Binary files /dev/null and b/kmc_fix_timeline2.png differ diff --git a/scout_test.png b/scout_test.png new file mode 100644 index 000000000..a131be6f4 Binary files /dev/null and b/scout_test.png differ diff --git a/scout_test2.png b/scout_test2.png new file mode 100644 index 000000000..3bc716f50 Binary files /dev/null and b/scout_test2.png differ diff --git a/scout_test3.png b/scout_test3.png new file mode 100644 index 000000000..5abc25bfa Binary files /dev/null and b/scout_test3.png differ diff --git a/screenshot_timeline.py b/screenshot_timeline.py new file mode 100644 index 000000000..3f5687142 --- /dev/null +++ b/screenshot_timeline.py @@ -0,0 +1,127 @@ +"""Screenshot KMC timeline views to verify fixed pipeline field paths. + +Creates a new workflow run and waits for the pipeline to complete. +Requires the dev server running on port 8001 with Celery worker. +""" +import json +import sys +import subprocess +import os +import time +from pathlib import Path + +from playwright.sync_api import sync_playwright + +BASE = "http://127.0.0.1:8001" +OPP_ID = 874 +TOKEN_FILE = Path.home() / ".commcare-connect" / "token.json" +OUT_DIR = Path(".") + + +def main(): + token = json.loads(TOKEN_FILE.read_text())["access_token"] + + with sync_playwright() as p: + browser = p.chromium.launch(headless=True) + ctx = browser.new_context(viewport={"width": 1400, "height": 900}) + page = ctx.new_page() + page.set_default_timeout(120_000) + + # Console logging + page.on("console", lambda msg: print(f" [CONSOLE] {msg.type}: {msg.text}") if "error" in msg.type.lower() else None) + + # Auth + page.goto(f"{BASE}/labs/test-auth/?token={token}") + page.wait_for_load_state("networkidle") + print("Authenticated") + + # Go to workflow list + page.goto(f"{BASE}/labs/workflow/?opportunity_id={OPP_ID}") + page.wait_for_load_state("domcontentloaded") + + # Find KMC workflow and create run + kmc_cards = page.locator('[data-workflow-template="kmc_longitudinal"]') + kmc_cards.last.wait_for(timeout=10_000) + print(f"Found {kmc_cards.count()} KMC workflow(s)") + + kmc_card = kmc_cards.last + kmc_card.get_by_text("Create Run").click() + page.wait_for_load_state("domcontentloaded") + + # Wait for pipeline — poll for Child List tab (up to 5 min) + wf_root = page.locator("#workflow-root") + wf_root.wait_for(timeout=30_000) + print("Waiting for pipeline to process (up to 5 min)...") + + child_list_btn = wf_root.get_by_role("button", name="Child List") + child_list_btn.wait_for(timeout=300_000) + print("Pipeline complete!") + + # Screenshot dashboard + page.wait_for_timeout(2000) + page.screenshot(path=str(OUT_DIR / "kmc_fix_dashboard.png"), full_page=True) + print("Saved: kmc_fix_dashboard.png") + + # Click "Child List" tab + child_list_btn.click() + page.wait_for_timeout(2000) + + # Sort by visits descending + visits_header = page.locator("th").filter(has_text="Visits") + visits_header.click() + page.wait_for_timeout(500) + visits_header.click() + page.wait_for_timeout(1000) + + page.screenshot(path=str(OUT_DIR / "kmc_fix_childlist.png"), full_page=False) + print("Saved: kmc_fix_childlist.png") + + # Click first child (most visits) + first_row = page.locator("table tbody tr").first + first_row.click() + page.wait_for_timeout(3000) + + page.screenshot(path=str(OUT_DIR / "kmc_fix_timeline1.png"), full_page=True) + print("Saved: kmc_fix_timeline1.png") + + # Scroll to see clinical panel + page.evaluate("window.scrollTo(0, 600)") + page.wait_for_timeout(1000) + page.screenshot(path=str(OUT_DIR / "kmc_fix_timeline2.png"), full_page=False) + print("Saved: kmc_fix_timeline2.png") + + # Back to child list, click second child + wf_root.get_by_role("button", name="Child List").click() + page.wait_for_timeout(1000) + visits_header = page.locator("th").filter(has_text="Visits") + visits_header.click() + page.wait_for_timeout(500) + visits_header.click() + page.wait_for_timeout(500) + + second_row = page.locator("table tbody tr").nth(1) + second_row.click() + page.wait_for_timeout(3000) + + page.screenshot(path=str(OUT_DIR / "kmc_fix_timeline3.png"), full_page=True) + print("Saved: kmc_fix_timeline3.png") + + # Cleanup: delete the run we created + import re + run_id_match = re.search(r"/run/(\d+)/", page.url) + if run_id_match: + run_id = run_id_match.group(1) + csrf = page.evaluate("document.querySelector('#workflow-root')?.dataset?.csrfToken || ''") + if csrf: + page.request.post( + f"{BASE}/labs/workflow/api/run/{run_id}/delete/?opportunity_id={OPP_ID}", + headers={"X-CSRFToken": csrf}, + ) + print(f"Cleaned up run {run_id}") + + browser.close() + print("Done!") + + +if __name__ == "__main__": + main()