Skip to content

Commit c03f639

Browse files
uestcsh917-artunknownevan-ak
authored
feat: integrate LazyLLM to provide more llm services (#265)
Co-authored-by: unknown <sunhao3@sensetime.com> Co-authored-by: Wu <evan299792458@outlook.com>
1 parent 603ae12 commit c03f639

File tree

6 files changed

+501
-1
lines changed

6 files changed

+501
-1
lines changed
Lines changed: 250 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,250 @@
1+
"""
2+
Unified Example: LazyLLM Integration Demo
3+
=========================================
4+
5+
This example merges functionalities from:
6+
1. Example 1: Conversation Memory Processing
7+
2. Example 2: Skill Extraction
8+
3. Example 3: Multimodal Processing
9+
10+
It demonstrates how to use the LazyLLM backend for:
11+
- Processing conversation history
12+
- Extracting technical skills from logs
13+
- Handling multimodal content (images + text)
14+
- defaut source and model are from qwen
15+
16+
Usage:
17+
export MEMU_QWEN_API_KEY=your_api_key
18+
python examples/example_5_with_lazyllm_client.py
19+
"""
20+
21+
import asyncio
22+
import os
23+
import sys
24+
from pathlib import Path
25+
26+
# Add src to sys.path FIRST before importing memu
27+
project_root = Path(__file__).parent.parent
28+
src_path = str(project_root / "src")
29+
if src_path not in sys.path:
30+
sys.path.insert(0, src_path)
31+
32+
from memu.app import MemoryService # noqa: E402
33+
34+
# ==========================================
35+
# PART 1: Conversation Memory Processing
36+
# ==========================================
37+
38+
39+
async def run_conversation_memory_demo(service):
40+
print("\n" + "=" * 60)
41+
print("PART 1: Conversation Memory Processing")
42+
print("=" * 60)
43+
44+
conversation_files = [
45+
"examples/resources/conversations/conv1.json",
46+
"examples/resources/conversations/conv2.json",
47+
"examples/resources/conversations/conv3.json",
48+
]
49+
50+
total_items = 0
51+
categories = []
52+
53+
for conv_file in conversation_files:
54+
if not os.path.exists(conv_file):
55+
print(f"⚠ File not found: {conv_file}")
56+
continue
57+
58+
try:
59+
print(f" Processing: {conv_file}")
60+
result = await service.memorize(resource_url=conv_file, modality="conversation")
61+
total_items += len(result.get("items", []))
62+
categories = result.get("categories", [])
63+
print(f" ✓ Extracted {len(result.get('items', []))} items")
64+
except Exception as e:
65+
print(f" ✗ Error processing {conv_file}: {e}")
66+
67+
# Output generation
68+
output_dir = "examples/output/lazyllm_example/conversation"
69+
os.makedirs(output_dir, exist_ok=True)
70+
await generate_markdown_output(categories, output_dir)
71+
print(f"✓ Conversation processing complete. Output: {output_dir}")
72+
73+
74+
# ==========================================
75+
# PART 2: Skill Extraction
76+
# ==========================================
77+
78+
79+
async def run_skill_extraction_demo(service):
80+
print("\n" + "=" * 60)
81+
print("PART 2: Skill Extraction from Logs")
82+
print("=" * 60)
83+
84+
# Configure prompt for skill extraction
85+
skill_prompt = """
86+
You are analyzing an agent execution log. Extract the key actions taken, their outcomes, and lessons learned.
87+
88+
Output MUST be valid XML wrapped in <skills> tags.
89+
Format:
90+
<skills>
91+
<memory>
92+
<content>
93+
[Action] Description...
94+
[Lesson] Key lesson...
95+
</content>
96+
<categories>
97+
<category>Category Name</category>
98+
</categories>
99+
</memory>
100+
</skills>
101+
102+
Text: {resource}
103+
"""
104+
105+
# Update service config for skill extraction
106+
service.memorize_config.memory_types = ["skill"]
107+
service.memorize_config.memory_type_prompts = {"skill": skill_prompt}
108+
109+
logs = ["examples/resources/logs/log1.txt", "examples/resources/logs/log2.txt", "examples/resources/logs/log3.txt"]
110+
111+
all_skills = []
112+
for log_file in logs:
113+
if not os.path.exists(log_file):
114+
continue
115+
116+
print(f" Processing log: {log_file}")
117+
try:
118+
result = await service.memorize(resource_url=log_file, modality="document")
119+
for item in result.get("items", []):
120+
if item.get("memory_type") == "skill":
121+
all_skills.append(item.get("summary", ""))
122+
print(f" ✓ Extracted {len(result.get('items', []))} skills")
123+
except Exception as e:
124+
print(f" ✗ Error: {e}")
125+
126+
# Generate summary guide
127+
if all_skills:
128+
output_file = "examples/output/lazyllm_example/skills/skill_guide.md"
129+
await generate_skill_guide(all_skills, service, output_file)
130+
print(f"✓ Skill guide generated: {output_file}")
131+
132+
133+
# ==========================================
134+
# PART 3: Multimodal Memory
135+
# ==========================================
136+
137+
138+
async def run_multimodal_demo(service):
139+
print("\n" + "=" * 60)
140+
print("PART 3: Multimodal Memory Processing")
141+
print("=" * 60)
142+
143+
# Configure for knowledge extraction
144+
xml_prompt = """
145+
Analyze content and extract key information.
146+
Output MUST be valid XML wrapped in <knowledge> tags.
147+
Format:
148+
<knowledge>
149+
<memory>
150+
<content>Extracted content...</content>
151+
<categories><category>category_name</category></categories>
152+
</memory>
153+
</knowledge>
154+
155+
Content: {resource}
156+
"""
157+
158+
service.memorize_config.memory_types = ["knowledge"]
159+
service.memorize_config.memory_type_prompts = {"knowledge": xml_prompt}
160+
161+
resources = [
162+
("examples/resources/docs/doc1.txt", "document"),
163+
("examples/resources/images/image1.png", "image"),
164+
]
165+
166+
categories = []
167+
for res_file, modality in resources:
168+
if not os.path.exists(res_file):
169+
continue
170+
171+
print(f" Processing {modality}: {res_file}")
172+
try:
173+
result = await service.memorize(resource_url=res_file, modality=modality)
174+
categories = result.get("categories", [])
175+
print(f" ✓ Extracted {len(result.get('items', []))} items")
176+
except Exception as e:
177+
print(f" ✗ Error: {e}")
178+
179+
output_dir = "examples/output/lazyllm_example/multimodal"
180+
os.makedirs(output_dir, exist_ok=True)
181+
await generate_markdown_output(categories, output_dir)
182+
print(f"✓ Multimodal processing complete. Output: {output_dir}")
183+
184+
185+
# ==========================================
186+
# Helpers
187+
# ==========================================
188+
189+
190+
async def generate_markdown_output(categories, output_dir):
191+
for cat in categories:
192+
name = cat.get("name", "unknown")
193+
summary = cat.get("summary", "")
194+
if not summary:
195+
continue
196+
197+
with open(os.path.join(output_dir, f"{name}.md"), "w", encoding="utf-8") as f:
198+
f.write(f"# {name.replace('_', ' ').title()}\n\n")
199+
cleaned = summary.replace("<content>", "").replace("</content>", "").strip()
200+
f.write(cleaned)
201+
202+
203+
async def generate_skill_guide(skills, service, output_file):
204+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
205+
skills_text = "\n\n".join(skills)
206+
prompt = f"Summarize these skills into a guide:\n\n{skills_text}"
207+
208+
# Use LazyLLM via service
209+
summary = await service.llm_client.summarize(text=prompt)
210+
211+
with open(output_file, "w", encoding="utf-8") as f:
212+
f.write(summary)
213+
214+
215+
# ==========================================
216+
# Main Entry
217+
# ==========================================
218+
219+
220+
async def main():
221+
print("Unified LazyLLM Example")
222+
print("=" * 60)
223+
# 1. Initialize Shared Service
224+
service = MemoryService(
225+
llm_profiles={
226+
"default": {
227+
"client_backend": "lazyllm_backend",
228+
"chat_model": "qwen3-max",
229+
"embed_model": "text-embedding-v3",
230+
"lazyllm_source": {
231+
"source": "qwen",
232+
"llm_source": "qwen",
233+
"vlm_source": "qwen",
234+
"embed_source": "qwen",
235+
"stt_source": "qwen",
236+
"vlm_model": "qwen-vl-plus",
237+
"stt_model": "qwen-audio-turbo",
238+
},
239+
},
240+
}
241+
)
242+
243+
# 2. Run Demos
244+
await run_conversation_memory_demo(service)
245+
# await run_skill_extraction_demo(service)
246+
# await run_multimodal_demo(service)
247+
248+
249+
if __name__ == "__main__":
250+
asyncio.run(main())

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ dependencies = [
2727
"alembic>=1.14.0",
2828
"pendulum>=3.1.0",
2929
"langchain-core>=1.2.7",
30+
"lazyllm>=0.7.3",
3031
]
3132

3233
[build-system]

src/memu/app/service.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,19 @@ def _init_llm_client(self, config: LLMConfig | None = None) -> Any:
117117
endpoint_overrides=cfg.endpoint_overrides,
118118
embed_model=cfg.embed_model,
119119
)
120+
elif backend == "lazyllm_backend":
121+
from memu.llm.lazyllm_client import LazyLLMClient
122+
123+
return LazyLLMClient(
124+
llm_source=cfg.lazyllm_source.llm_source or cfg.lazyllm_source.source,
125+
vlm_source=cfg.lazyllm_source.vlm_source or cfg.lazyllm_source.source,
126+
embed_source=cfg.lazyllm_source.embed_source or cfg.lazyllm_source.source,
127+
stt_source=cfg.lazyllm_source.stt_source or cfg.lazyllm_source.source,
128+
chat_model=cfg.chat_model,
129+
embed_model=cfg.embed_model,
130+
vlm_model=cfg.lazyllm_source.vlm_model,
131+
stt_model=cfg.lazyllm_source.stt_model,
132+
)
120133
else:
121134
msg = f"Unknown llm_client_backend '{cfg.client_backend}'"
122135
raise ValueError(msg)

src/memu/app/settings.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,16 @@ def _default_memory_categories() -> list[CategoryConfig]:
8989
]
9090

9191

92+
class LazyLLMSource(BaseModel):
93+
source: str | None = Field(default=None, description="default source for lazyllm client backend")
94+
llm_source: str | None = Field(default=None, description="LLM source for lazyllm client backend")
95+
embed_source: str | None = Field(default=None, description="Embedding source for lazyllm client backend")
96+
vlm_source: str | None = Field(default=None, description="VLM source for lazyllm client backend")
97+
stt_source: str | None = Field(default=None, description="STT source for lazyllm client backend")
98+
vlm_model: str = Field(default="qwen-vl-plus", description="Vision language model for lazyllm client backend")
99+
stt_model: str = Field(default="qwen-audio-turbo", description="Speech-to-text model for lazyllm client backend")
100+
101+
92102
class LLMConfig(BaseModel):
93103
provider: str = Field(
94104
default="openai",
@@ -99,8 +109,9 @@ class LLMConfig(BaseModel):
99109
chat_model: str = Field(default="gpt-4o-mini")
100110
client_backend: str = Field(
101111
default="sdk",
102-
description="Which LLM client backend to use: 'httpx' (httpx) or 'sdk' (official OpenAI).",
112+
description="Which LLM client backend to use: 'httpx' (httpx), 'sdk' (official OpenAI), or 'lazyllm_backend' (for more LLM source like Qwen, Doubao, SIliconflow, etc.)",
103113
)
114+
lazyllm_source: LazyLLMSource = Field(default=LazyLLMSource())
104115
endpoint_overrides: dict[str, str] = Field(
105116
default_factory=dict,
106117
description="Optional overrides for HTTP endpoints (keys: 'chat'/'summary').",

0 commit comments

Comments
 (0)