@@ -402,9 +402,9 @@ def generate_chat_response() -> Generator[str, None, None]:
402402
403403 # Prepare preference markdown string
404404 if chat_req .include_preference :
405- pref_md_string = self . _build_pref_md_string_for_playground (
406- search_response . data [ "pref_mem" ][ 0 ].get ("memories" , [])
407- )
405+ pref_list = search_response . data . get ( "pref_mem" ) or []
406+ pref_memories = pref_list [ 0 ].get ("memories" , []) if pref_list else []
407+ pref_md_string = self . _build_pref_md_string_for_playground ( pref_memories )
408408 yield f"data: { json .dumps ({'type' : 'pref_md_string' , 'data' : pref_md_string })} \n \n "
409409
410410 # Step 2: Build system prompt with memories
@@ -425,8 +425,6 @@ def generate_chat_response() -> Generator[str, None, None]:
425425 f"current_system_prompt: { system_prompt } "
426426 )
427427
428- yield f"data: { json .dumps ({'type' : 'status' , 'data' : '2' })} \n \n "
429-
430428 # Step 3: Generate streaming response from LLM
431429 if (
432430 chat_req .model_name_or_path
@@ -448,9 +446,11 @@ def generate_chat_response() -> Generator[str, None, None]:
448446 for chunk in response_stream :
449447 if chunk == "<think>" :
450448 in_think = True
449+ yield f"data: { json .dumps ({'type' : 'status' , 'data' : 'reasoning' })} \n \n "
451450 continue
452451 if chunk == "</think>" :
453452 in_think = False
453+ yield f"data: { json .dumps ({'type' : 'status' , 'data' : '2' })} \n \n "
454454 continue
455455
456456 if in_think :
@@ -564,17 +564,17 @@ def _build_pref_md_string_for_playground(self, pref_mem_list: list[any]) -> str:
564564 explicit = []
565565 implicit = []
566566 for pref_mem in pref_mem_list :
567- if pref_mem ["metadata" ]["preference_type" ] == "explicit " :
567+ if pref_mem ["metadata" ]["preference_type" ] == "explicit_preference " :
568568 explicit .append (
569569 {
570- "content" : pref_mem ["preference" ],
570+ "content" : pref_mem ["metadata" ][ " preference" ],
571571 "reasoning" : pref_mem ["metadata" ]["reasoning" ],
572572 }
573573 )
574- elif pref_mem ["metadata" ]["preference_type" ] == "implicit " :
574+ elif pref_mem ["metadata" ]["preference_type" ] == "implicit_preference " :
575575 implicit .append (
576576 {
577- "content" : pref_mem ["preference" ],
577+ "content" : pref_mem ["metadata" ][ " preference" ],
578578 "reasoning" : pref_mem ["metadata" ]["reasoning" ],
579579 }
580580 )
0 commit comments