|
1 | 1 | import asyncio |
2 | 2 | import os |
3 | 3 | from typing import List |
4 | | - |
| 4 | +from dotenv import load_dotenv |
| 5 | +load_dotenv() |
5 | 6 | from llama_index.core.agent import ReActAgent |
6 | 7 | from llama_index.llms.openai import OpenAI |
7 | 8 | from llama_index.tools.moss import MossToolSpec, QueryOptions |
8 | 9 | from inferedge_moss import MossClient, DocumentInfo |
9 | 10 |
|
10 | 11 |
|
11 | 12 | async def main(): |
12 | | - print("--- Moss Tool with ReAct Agent Example ---\n") |
13 | | - |
14 | 13 | # 1. Initialize Client |
15 | | - # Ensure you have your environment variables set or pass credentials directly. |
16 | | - MOSS_PROJECT_KEY = os.getenv('MOSS_PROJECT_KEY') |
17 | | - MOSS_PROJECT_ID = os.getenv('MOSS_PROJECT_ID') |
| 14 | + MOSS_PROJECT_KEY = os.getenv("MOSS_PROJECT_KEY") |
| 15 | + MOSS_PROJECT_ID = os.getenv("MOSS_PROJECT_ID") |
18 | 16 | client = MossClient(project_id=MOSS_PROJECT_ID, project_key=MOSS_PROJECT_KEY) |
19 | | - # 2. Configure query settings - Instantiate QueryOptions (Optional) |
20 | | - # If skipped, the tool will use its own defaults. |
21 | | - query_options = QueryOptions(top_k=12, alpha=0.9) |
| 17 | + |
| 18 | + # 2. Configure query settings (optional — defaults: top_k=5, alpha=0.5, model_id="moss-minilm") |
| 19 | + query_options = QueryOptions(top_k=5, alpha=0.5, model_id="moss-minilm") |
| 20 | + |
22 | 21 | # 3. Initialize Tool |
23 | | - print("Initializing MossToolSpec...") |
24 | 22 | moss_tool = MossToolSpec( |
25 | 23 | client=client, |
26 | | - index_name="knowledge_base", |
27 | | - query_options=query_options |
| 24 | + index_name="knowledge_base_new", |
| 25 | + query_options=query_options, |
28 | 26 | ) |
29 | 27 |
|
30 | | - # 4. Index Documents (Optional step) |
31 | | - print("\n[Step 4] Indexing Documents...") |
32 | | - docs = [ |
| 28 | + # 4. List existing indexes before indexing |
| 29 | + print("\n[Step 4] Listing existing indexes...") |
| 30 | + print(await moss_tool.list_indexes()) |
| 31 | + |
| 32 | + # 5. Index Documents |
| 33 | + print("\n[Step 5] Indexing Documents...") |
| 34 | + docs: List[DocumentInfo] = [ |
33 | 35 | DocumentInfo( |
| 36 | + id="123", |
34 | 37 | text="LlamaIndex is a data framework for LLM-based applications.", |
35 | | - metadata={"source": "docs", "category": "framework"} |
| 38 | + metadata={"source": "docs", "category": "framework"}, |
36 | 39 | ), |
37 | 40 | DocumentInfo( |
| 41 | + id="124", |
38 | 42 | text="Moss is a real-time semantic search engine optimized for speed.", |
39 | | - metadata={"source": "moss_website", "category": "engine"} |
| 43 | + metadata={"source": "moss_website", "category": "engine"}, |
40 | 44 | ), |
41 | 45 | ] |
42 | 46 | await moss_tool.index_docs(docs) |
43 | 47 | print(f"Indexed {len(docs)} documents.") |
44 | 48 |
|
45 | | - # 5. Create an agent (Using OpenAI llm for demonstration) |
46 | | - print("\n[Step 5] Creating Agent...") |
47 | | - os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-here') |
48 | | - llm = OpenAI() |
49 | | - agent = ReActAgent.from_tools( |
50 | | - moss_tool.to_tool_list(), |
| 49 | + # 6. List indexes again to confirm creation |
| 50 | + print("\n[Step 6] Listing indexes after indexing...") |
| 51 | + print(await moss_tool.list_indexes()) |
| 52 | + |
| 53 | + # 7. Create agent with all exposed tools (query, list_indexes, delete_index) |
| 54 | + print("\n[Step 7] Creating Agent...") |
| 55 | + llm = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) |
| 56 | + agent = ReActAgent( |
| 57 | + tools=moss_tool.to_tool_list(), |
51 | 58 | llm=llm, |
52 | | - verbose=True |
| 59 | + verbose=True, |
53 | 60 | ) |
54 | 61 |
|
55 | | - # 6. Run Agent |
56 | | - print("\n[Step 6] Querying...") |
57 | | - # This query would trigger the tool usage in a real scenario |
58 | | - response = await agent.achat("What is Moss?") |
| 62 | + # 8. Run Agent — natural language query triggers the query tool |
| 63 | + print("\n[Step 8] Querying via Agent...") |
| 64 | + response = await agent.run(user_msg="What is Moss?") |
59 | 65 | print("\nAgent Response:") |
60 | 66 | print(response) |
61 | 67 |
|
| 68 | + # 9. Run Agent — ask it to list available indexes |
| 69 | + print("\n[Step 9] Listing indexes via Agent...") |
| 70 | + response = await agent.run(user_msg="What indexes are available?") |
| 71 | + print("\nAgent Response:") |
| 72 | + print(response) |
| 73 | + |
| 74 | + # 10. Clean up — delete the index directly (not via agent to avoid accidental deletion) |
| 75 | + print("\n[Step 10] Cleaning up...") |
| 76 | + print(await moss_tool.delete_index("knowledge_base")) |
| 77 | + |
62 | 78 |
|
63 | 79 | if __name__ == "__main__": |
64 | | - # Ensure we catch ImportError for better user experience if run without deps |
65 | | - try: |
66 | | - asyncio.run(main()) |
67 | | - except ImportError as e: |
68 | | - print(f"Error: {e}") |
69 | | - print("Please install required dependencies: pip install llama-index-tools-moss llama-index-core llama-index-llms-openai inferedge-moss") |
70 | | - except Exception as e: |
71 | | - print(f"An error occurred: {e}") |
| 80 | + asyncio.run(main()) |
0 commit comments