|
| 1 | +""" |
| 2 | +Unified Example: LazyLLM Integration Demo |
| 3 | +========================================= |
| 4 | +
|
| 5 | +This example merges functionalities from: |
| 6 | +1. Example 1: Conversation Memory Processing |
| 7 | +2. Example 2: Skill Extraction |
| 8 | +3. Example 3: Multimodal Processing |
| 9 | +
|
| 10 | +It demonstrates how to use the LazyLLM backend for: |
| 11 | +- Processing conversation history |
| 12 | +- Extracting technical skills from logs |
| 13 | +- Handling multimodal content (images + text) |
| 14 | +- defaut source and model are from qwen |
| 15 | +
|
| 16 | +Usage: |
| 17 | + export MEMU_QWEN_API_KEY=your_api_key |
| 18 | + python examples/example_5_with_lazyllm_client.py |
| 19 | +""" |
| 20 | + |
| 21 | +import asyncio |
| 22 | +import os |
| 23 | +import sys |
| 24 | +from pathlib import Path |
| 25 | + |
| 26 | +# Add src to sys.path FIRST before importing memu |
| 27 | +project_root = Path(__file__).parent.parent |
| 28 | +src_path = str(project_root / "src") |
| 29 | +if src_path not in sys.path: |
| 30 | + sys.path.insert(0, src_path) |
| 31 | + |
| 32 | +from memu.app import MemoryService # noqa: E402 |
| 33 | + |
| 34 | +# ========================================== |
| 35 | +# PART 1: Conversation Memory Processing |
| 36 | +# ========================================== |
| 37 | + |
| 38 | + |
| 39 | +async def run_conversation_memory_demo(service): |
| 40 | + print("\n" + "=" * 60) |
| 41 | + print("PART 1: Conversation Memory Processing") |
| 42 | + print("=" * 60) |
| 43 | + |
| 44 | + conversation_files = [ |
| 45 | + "examples/resources/conversations/conv1.json", |
| 46 | + "examples/resources/conversations/conv2.json", |
| 47 | + "examples/resources/conversations/conv3.json", |
| 48 | + ] |
| 49 | + |
| 50 | + total_items = 0 |
| 51 | + categories = [] |
| 52 | + |
| 53 | + for conv_file in conversation_files: |
| 54 | + if not os.path.exists(conv_file): |
| 55 | + print(f"⚠ File not found: {conv_file}") |
| 56 | + continue |
| 57 | + |
| 58 | + try: |
| 59 | + print(f" Processing: {conv_file}") |
| 60 | + result = await service.memorize(resource_url=conv_file, modality="conversation") |
| 61 | + total_items += len(result.get("items", [])) |
| 62 | + categories = result.get("categories", []) |
| 63 | + print(f" ✓ Extracted {len(result.get('items', []))} items") |
| 64 | + except Exception as e: |
| 65 | + print(f" ✗ Error processing {conv_file}: {e}") |
| 66 | + |
| 67 | + # Output generation |
| 68 | + output_dir = "examples/output/lazyllm_example/conversation" |
| 69 | + os.makedirs(output_dir, exist_ok=True) |
| 70 | + await generate_markdown_output(categories, output_dir) |
| 71 | + print(f"✓ Conversation processing complete. Output: {output_dir}") |
| 72 | + |
| 73 | + |
| 74 | +# ========================================== |
| 75 | +# PART 2: Skill Extraction |
| 76 | +# ========================================== |
| 77 | + |
| 78 | + |
| 79 | +async def run_skill_extraction_demo(service): |
| 80 | + print("\n" + "=" * 60) |
| 81 | + print("PART 2: Skill Extraction from Logs") |
| 82 | + print("=" * 60) |
| 83 | + |
| 84 | + # Configure prompt for skill extraction |
| 85 | + skill_prompt = """ |
| 86 | + You are analyzing an agent execution log. Extract the key actions taken, their outcomes, and lessons learned. |
| 87 | +
|
| 88 | + Output MUST be valid XML wrapped in <skills> tags. |
| 89 | + Format: |
| 90 | + <skills> |
| 91 | + <memory> |
| 92 | + <content> |
| 93 | + [Action] Description... |
| 94 | + [Lesson] Key lesson... |
| 95 | + </content> |
| 96 | + <categories> |
| 97 | + <category>Category Name</category> |
| 98 | + </categories> |
| 99 | + </memory> |
| 100 | + </skills> |
| 101 | +
|
| 102 | + Text: {resource} |
| 103 | + """ |
| 104 | + |
| 105 | + # Update service config for skill extraction |
| 106 | + service.memorize_config.memory_types = ["skill"] |
| 107 | + service.memorize_config.memory_type_prompts = {"skill": skill_prompt} |
| 108 | + |
| 109 | + logs = ["examples/resources/logs/log1.txt", "examples/resources/logs/log2.txt", "examples/resources/logs/log3.txt"] |
| 110 | + |
| 111 | + all_skills = [] |
| 112 | + for log_file in logs: |
| 113 | + if not os.path.exists(log_file): |
| 114 | + continue |
| 115 | + |
| 116 | + print(f" Processing log: {log_file}") |
| 117 | + try: |
| 118 | + result = await service.memorize(resource_url=log_file, modality="document") |
| 119 | + for item in result.get("items", []): |
| 120 | + if item.get("memory_type") == "skill": |
| 121 | + all_skills.append(item.get("summary", "")) |
| 122 | + print(f" ✓ Extracted {len(result.get('items', []))} skills") |
| 123 | + except Exception as e: |
| 124 | + print(f" ✗ Error: {e}") |
| 125 | + |
| 126 | + # Generate summary guide |
| 127 | + if all_skills: |
| 128 | + output_file = "examples/output/lazyllm_example/skills/skill_guide.md" |
| 129 | + await generate_skill_guide(all_skills, service, output_file) |
| 130 | + print(f"✓ Skill guide generated: {output_file}") |
| 131 | + |
| 132 | + |
| 133 | +# ========================================== |
| 134 | +# PART 3: Multimodal Memory |
| 135 | +# ========================================== |
| 136 | + |
| 137 | + |
| 138 | +async def run_multimodal_demo(service): |
| 139 | + print("\n" + "=" * 60) |
| 140 | + print("PART 3: Multimodal Memory Processing") |
| 141 | + print("=" * 60) |
| 142 | + |
| 143 | + # Configure for knowledge extraction |
| 144 | + xml_prompt = """ |
| 145 | + Analyze content and extract key information. |
| 146 | + Output MUST be valid XML wrapped in <knowledge> tags. |
| 147 | + Format: |
| 148 | + <knowledge> |
| 149 | + <memory> |
| 150 | + <content>Extracted content...</content> |
| 151 | + <categories><category>category_name</category></categories> |
| 152 | + </memory> |
| 153 | + </knowledge> |
| 154 | +
|
| 155 | + Content: {resource} |
| 156 | + """ |
| 157 | + |
| 158 | + service.memorize_config.memory_types = ["knowledge"] |
| 159 | + service.memorize_config.memory_type_prompts = {"knowledge": xml_prompt} |
| 160 | + |
| 161 | + resources = [ |
| 162 | + ("examples/resources/docs/doc1.txt", "document"), |
| 163 | + ("examples/resources/images/image1.png", "image"), |
| 164 | + ] |
| 165 | + |
| 166 | + categories = [] |
| 167 | + for res_file, modality in resources: |
| 168 | + if not os.path.exists(res_file): |
| 169 | + continue |
| 170 | + |
| 171 | + print(f" Processing {modality}: {res_file}") |
| 172 | + try: |
| 173 | + result = await service.memorize(resource_url=res_file, modality=modality) |
| 174 | + categories = result.get("categories", []) |
| 175 | + print(f" ✓ Extracted {len(result.get('items', []))} items") |
| 176 | + except Exception as e: |
| 177 | + print(f" ✗ Error: {e}") |
| 178 | + |
| 179 | + output_dir = "examples/output/lazyllm_example/multimodal" |
| 180 | + os.makedirs(output_dir, exist_ok=True) |
| 181 | + await generate_markdown_output(categories, output_dir) |
| 182 | + print(f"✓ Multimodal processing complete. Output: {output_dir}") |
| 183 | + |
| 184 | + |
| 185 | +# ========================================== |
| 186 | +# Helpers |
| 187 | +# ========================================== |
| 188 | + |
| 189 | + |
| 190 | +async def generate_markdown_output(categories, output_dir): |
| 191 | + for cat in categories: |
| 192 | + name = cat.get("name", "unknown") |
| 193 | + summary = cat.get("summary", "") |
| 194 | + if not summary: |
| 195 | + continue |
| 196 | + |
| 197 | + with open(os.path.join(output_dir, f"{name}.md"), "w", encoding="utf-8") as f: |
| 198 | + f.write(f"# {name.replace('_', ' ').title()}\n\n") |
| 199 | + cleaned = summary.replace("<content>", "").replace("</content>", "").strip() |
| 200 | + f.write(cleaned) |
| 201 | + |
| 202 | + |
| 203 | +async def generate_skill_guide(skills, service, output_file): |
| 204 | + os.makedirs(os.path.dirname(output_file), exist_ok=True) |
| 205 | + skills_text = "\n\n".join(skills) |
| 206 | + prompt = f"Summarize these skills into a guide:\n\n{skills_text}" |
| 207 | + |
| 208 | + # Use LazyLLM via service |
| 209 | + summary = await service.llm_client.summarize(text=prompt) |
| 210 | + |
| 211 | + with open(output_file, "w", encoding="utf-8") as f: |
| 212 | + f.write(summary) |
| 213 | + |
| 214 | + |
| 215 | +# ========================================== |
| 216 | +# Main Entry |
| 217 | +# ========================================== |
| 218 | + |
| 219 | + |
| 220 | +async def main(): |
| 221 | + print("Unified LazyLLM Example") |
| 222 | + print("=" * 60) |
| 223 | + # 1. Initialize Shared Service |
| 224 | + service = MemoryService( |
| 225 | + llm_profiles={ |
| 226 | + "default": { |
| 227 | + "client_backend": "lazyllm_backend", |
| 228 | + "chat_model": "qwen3-max", |
| 229 | + "embed_model": "text-embedding-v3", |
| 230 | + "lazyllm_source": { |
| 231 | + "source": "qwen", |
| 232 | + "llm_source": "qwen", |
| 233 | + "vlm_source": "qwen", |
| 234 | + "embed_source": "qwen", |
| 235 | + "stt_source": "qwen", |
| 236 | + "vlm_model": "qwen-vl-plus", |
| 237 | + "stt_model": "qwen-audio-turbo", |
| 238 | + }, |
| 239 | + }, |
| 240 | + } |
| 241 | + ) |
| 242 | + |
| 243 | + # 2. Run Demos |
| 244 | + await run_conversation_memory_demo(service) |
| 245 | + # await run_skill_extraction_demo(service) |
| 246 | + # await run_multimodal_demo(service) |
| 247 | + |
| 248 | + |
| 249 | +if __name__ == "__main__": |
| 250 | + asyncio.run(main()) |
0 commit comments