Skip to content

Commit ca42079

Browse files
committed
feat: update xapi event to send the object in right way with location and send usage
1 parent 35df7a4 commit ca42079

15 files changed

+206
-146
lines changed

backend/openedx_ai_extensions/processors/llm/educator_assistant_processor.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def _call_completion_api(self, system_role):
5858

5959
return {
6060
"response": content,
61-
"tokens_used": response.usage.total_tokens if response.usage else 0,
61+
"usage": response.usage if response.usage else None,
6262
"model_used": self.extra_params.get("model", "unknown"),
6363
"status": "success",
6464
}
@@ -93,12 +93,11 @@ def generate_quiz_questions(self, input_data):
9393
logger.exception(f"Error calling LiteLLM: {e}")
9494
return {"error": f"AI processing failed: {str(e)}"}
9595

96-
tokens_used = result.get("tokens_used", 0)
9796
response = json.loads(result['response'])
9897

9998
return {
10099
"response": response,
101-
"tokens_used": tokens_used,
100+
"usage": result.get("usage", None),
102101
"model_used": self.extra_params.get("model", "unknown"),
103102
"status": "success",
104103
}
@@ -121,19 +120,17 @@ def refine_quiz_question(self, input_data):
121120
for key, value in input_data.items():
122121
placeholder = f"{{{{{key.upper()}}}}}"
123122
prompt = prompt.replace(placeholder, str(value))
124-
logger.info(f"Refinement prompt after placeholder replacement: {prompt}")
125123

126124
try:
127125
result = self._call_completion_api(prompt)
128126
except Exception as e: # pylint: disable=broad-exception-caught
129127
logger.exception(f"Error calling LiteLLM during refinement: {e}")
130128
return {"error": f"AI processing failed: {str(e)}"}
131129

132-
tokens_used = result.get("tokens_used", 0)
133130
response = json.loads(result['response'])
134131
return {
135132
"response": response,
136-
"tokens_used": tokens_used,
133+
"usage": result.get("usage", None),
137134
"model_used": self.extra_params.get("model", "unknown"),
138135
"status": "success",
139136
}

backend/openedx_ai_extensions/processors/llm/llm_processor.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,10 @@ def _handle_streaming_completion(self, response):
8383
def _handle_non_streaming_completion(self, response):
8484
"""Handles the non-streaming logic, returning a response dict."""
8585
content = response.choices[0].message.content
86-
total_tokens = response.usage.total_tokens if response.usage else 0
87-
logger.info(f"[LLM NON-STREAM] Tokens used: {total_tokens}")
8886

8987
return {
9088
"response": content,
91-
"tokens_used": total_tokens,
89+
"usage": response.usage if response.usage else None,
9290
"model_used": self.provider,
9391
"status": "success",
9492
}
@@ -181,12 +179,10 @@ def _call_responses_wrapper(self, params, initialize=False):
181179
if response_id:
182180
self.user_session.remote_response_id = response_id
183181
self.user_session.save()
184-
total_tokens = response.usage.total_tokens if response.usage else 0
185-
logger.info(f"[LLM NON-STREAM] Tokens used: {total_tokens}")
186182

187183
result = {
188184
"response": content,
189-
"tokens_used": total_tokens,
185+
"usage": response.usage if response.usage else None,
190186
"model_used": self.extra_params.get("model", "unknown"),
191187
"status": "success",
192188
}
@@ -682,12 +678,11 @@ def generate_flashcards(self):
682678
if "error" in result:
683679
return result
684680

685-
tokens_used = result.get("tokens_used", 0)
686681
response = json.loads(result['response'])
687682

688683
return {
689684
"response": response,
690-
"tokens_used": tokens_used,
685+
"usage": result.get("usage", None),
691686
"model_used": self.extra_params.get("model", "unknown"),
692687
"status": "success",
693688
}

backend/openedx_ai_extensions/workflows/orchestrators/base_orchestrator.py

Lines changed: 53 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,21 +19,68 @@ def __init__(self, workflow, user, context):
1919
self.location_id = context.get("location_id", None)
2020
self.course_id = context.get("course_id", None)
2121

22-
def _emit_workflow_event(self, event_name: str) -> None:
22+
def _convert_usage_to_json_serializable(self, usage) -> dict:
23+
"""
24+
Convert usage data to a JSON-serializable format.
25+
26+
This is necessary because usage data may contain complex objects
27+
(e.g. litellm Usage Pydantic models) that cannot be directly
28+
serialized to JSON when included in xAPI event data. This method
29+
first normalizes the input to a plain dict, then ensures every
30+
value is JSON-serializable.
31+
32+
Args:
33+
usage: A dictionary or Pydantic model containing usage data.
34+
35+
Returns:
36+
A new dictionary with all values converted to JSON-serializable formats.
37+
"""
38+
if isinstance(usage, dict):
39+
usage_dict = usage
40+
elif hasattr(usage, "model_dump"):
41+
# Pydantic v2 models (e.g. litellm Usage)
42+
usage_dict = usage.model_dump()
43+
elif hasattr(usage, "dict"):
44+
# Pydantic v1 models
45+
usage_dict = usage.dict()
46+
else:
47+
usage_dict = vars(usage)
48+
49+
serializable_usage = {}
50+
for key, value in usage_dict.items():
51+
if isinstance(value, (str, int, float, bool)) or value is None:
52+
serializable_usage[key] = value
53+
else:
54+
# For non-serializable types, convert to string representation
55+
serializable_usage[key] = str(value)
56+
return serializable_usage
57+
58+
def _emit_workflow_event(self, event_name: str, usage: dict = None) -> None:
2359
"""
2460
Emit an xAPI event for this workflow.
2561
2662
Args:
2763
event_name: The event name constant (e.g., EVENT_NAME_WORKFLOW_COMPLETED)
2864
"""
29-
30-
tracker.emit(event_name, {
65+
event_data = {
3166
"workflow_id": str(self.workflow.id),
3267
"action": self.workflow.action,
33-
"course_id": str(self.course_id),
68+
"course_id": str(self.course_id) if self.course_id else "",
3469
"profile_name": self.profile.slug,
35-
"location_id": str(self.location_id),
36-
})
70+
"location_id": str(self.location_id) if self.location_id else "",
71+
}
72+
if usage:
73+
event_data["usage"] = self._convert_usage_to_json_serializable(usage)
74+
75+
tracking_context = {}
76+
if self.course_id:
77+
tracking_context["course_id"] = str(self.course_id)
78+
79+
if tracking_context:
80+
with tracker.get_tracker().context("ai_workflow", tracking_context):
81+
tracker.emit(event_name, event_data)
82+
else:
83+
tracker.emit(event_name, event_data)
3784

3885
def run(self, input_data):
3986
raise NotImplementedError("Subclasses must implement run method")

backend/openedx_ai_extensions/workflows/orchestrators/direct_orchestrator.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -70,17 +70,13 @@ def run(self, input_data):
7070
}
7171

7272
# 6. Emit completed event for one-shot workflow
73-
self._emit_workflow_event(EVENT_NAME_WORKFLOW_COMPLETED)
73+
self._emit_workflow_event(EVENT_NAME_WORKFLOW_COMPLETED, usage=llm_result.get('usage', None))
7474

7575
# --- 7. Return Structured Non-Streaming Result ---
7676
# If execution reaches this point, we have a successful, non-streaming result (Dict).
7777
response_data = {
7878
'response': llm_result.get('response', 'No response available'),
7979
'status': 'completed',
80-
'metadata': {
81-
'tokens_used': llm_result.get('tokens_used'),
82-
'model_used': llm_result.get('model_used')
83-
}
8480
}
8581
return response_data
8682

@@ -199,14 +195,10 @@ def run(self, input_data):
199195
"collection_id": collection_key,
200196
}
201197
self.session.save(update_fields=["metadata"])
202-
self._emit_workflow_event(EVENT_NAME_WORKFLOW_COMPLETED)
198+
self._emit_workflow_event(EVENT_NAME_WORKFLOW_COMPLETED, usage=llm_result.get('usage', None))
203199
return {
204200
'response': f"authoring/library/{lib_key_str}/collection/{collection_key}",
205-
'status': 'completed',
206-
'metadata': {
207-
'tokens_used': llm_result.get('tokens_used'),
208-
'model_used': llm_result.get('model_used')
209-
}
201+
'status': 'completed'
210202
}
211203

212204
# Iterative path: store questions for review

backend/openedx_ai_extensions/workflows/orchestrators/flashcards_orchestrator.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def run(self, input_data):
107107
'status': 'LLMProcessor error'
108108
}
109109

110-
self._emit_workflow_event(EVENT_NAME_WORKFLOW_COMPLETED)
110+
self._emit_workflow_event(EVENT_NAME_WORKFLOW_COMPLETED, usage=llm_result.get('usage', None))
111111

112112
response_obj = llm_result.get('response')
113113
cards = self._get_structured_cards(response_obj)
@@ -122,10 +122,6 @@ def run(self, input_data):
122122
response_data = {
123123
'response': cards,
124124
'status': 'completed',
125-
'metadata': {
126-
'tokens_used': llm_result.get('tokens_used'),
127-
'model_used': llm_result.get('model_used')
128-
}
129125
}
130126
return response_data
131127

backend/openedx_ai_extensions/workflows/orchestrators/threaded_orchestrator.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -184,16 +184,12 @@ def run(self, input_data):
184184

185185
# Emit appropriate event based on interaction state
186186
if is_first_interaction:
187-
self._emit_workflow_event(EVENT_NAME_WORKFLOW_INITIALIZED)
187+
self._emit_workflow_event(EVENT_NAME_WORKFLOW_INITIALIZED, usage=llm_result.get("usage", None))
188188
else:
189-
self._emit_workflow_event(EVENT_NAME_WORKFLOW_INTERACTED)
189+
self._emit_workflow_event(EVENT_NAME_WORKFLOW_INTERACTED, usage=llm_result.get("usage", None))
190190

191191
# 4. Return result
192192
return {
193193
"response": llm_result.get("response", "No response available"),
194194
"status": "completed",
195-
"metadata": {
196-
"tokens_used": llm_result.get("tokens_used"),
197-
"model_used": llm_result.get("model_used"),
198-
},
199195
}

backend/openedx_ai_extensions/xapi/constants.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
XAPI_EXTENSION_WORKFLOW_ACTION = "https://w3id.org/xapi/openedx/extension/ai-workflow-action"
2323
XAPI_EXTENSION_PROMPT_TEMPLATE_SLUG = "https://w3id.org/xapi/openedx/extension/prompt-template-slug"
2424
XAPI_EXTENSION_LOCATION_ID = "https://w3id.org/xapi/openedx/extension/location-id"
25+
XAPI_EXTENSION_USAGE = "https://w3id.org/xapi/openedx/extension/ai-usage"
2526

2627
# Event names
2728
EVENT_NAME_WORKFLOW_INITIALIZED = "openedx.ai.workflow.initialized"

backend/openedx_ai_extensions/xapi/transformers.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,11 @@ def get_object(self) -> Activity:
5353
if location_id:
5454
extensions[constants.XAPI_EXTENSION_LOCATION_ID] = location_id
5555

56+
# Add LLM usage data (token counts, etc.) if available
57+
usage: Optional[dict] = self.get_data("usage")
58+
if usage:
59+
extensions[constants.XAPI_EXTENSION_USAGE] = usage
60+
5661
return Activity(
5762
id=self.get_object_iri("ai_workflow", "__".join([profile_name, action])),
5863
definition=ActivityDefinition(

0 commit comments

Comments
 (0)