|
| 1 | +from pydantic import BaseModel, Field |
| 2 | + |
| 3 | +from memu.prompts.memory_type import DEFAULT_MEMORY_TYPES |
| 4 | +from memu.prompts.memory_type import PROMPTS as DEFAULT_MEMORY_TYPE_PROMPTS |
| 5 | + |
| 6 | + |
| 7 | +def _default_memory_types() -> list[str]: |
| 8 | + return list(DEFAULT_MEMORY_TYPES) |
| 9 | + |
| 10 | + |
| 11 | +def _default_memory_type_prompts() -> dict[str, str]: |
| 12 | + return dict(DEFAULT_MEMORY_TYPE_PROMPTS) |
| 13 | + |
| 14 | + |
| 15 | +def _default_memory_categories() -> list[dict[str, str]]: |
| 16 | + return [ |
| 17 | + {"name": "personal_info", "description": "Personal information about the user"}, |
| 18 | + {"name": "preferences", "description": "User preferences, likes and dislikes"}, |
| 19 | + {"name": "relationships", "description": "Information about relationships with others"}, |
| 20 | + {"name": "activities", "description": "Activities, hobbies, and interests"}, |
| 21 | + {"name": "goals", "description": "Goals, aspirations, and objectives"}, |
| 22 | + {"name": "experiences", "description": "Past experiences and events"}, |
| 23 | + {"name": "knowledge", "description": "Knowledge, facts, and learned information"}, |
| 24 | + {"name": "opinions", "description": "Opinions, viewpoints, and perspectives"}, |
| 25 | + {"name": "habits", "description": "Habits, routines, and patterns"}, |
| 26 | + {"name": "work_life", "description": "Work-related information and professional life"}, |
| 27 | + ] |
| 28 | + |
| 29 | + |
| 30 | +class AppSettings(BaseModel): |
| 31 | + # where to store raw resources |
| 32 | + resources_dir: str = Field(default="./resources") |
| 33 | + # openai base |
| 34 | + openai_base: str = Field(default="https://api.openai.com/v1") |
| 35 | + openai_api_key_env: str = Field(default="OPENAI_API_KEY") |
| 36 | + # models |
| 37 | + chat_model: str = Field(default="gpt-4o-mini") |
| 38 | + embed_model: str = Field(default="text-embedding-3-small") |
| 39 | + llm_client_backend: str = Field( |
| 40 | + default="httpx", |
| 41 | + description="Which OpenAI client backend to use: 'httpx' (httpx) or 'sdk' (official OpenAI).", |
| 42 | + ) |
| 43 | + llm_http_provider: str = Field( |
| 44 | + default="openai", |
| 45 | + description="Name of the HTTP LLM provider implementation (e.g. 'openai').", |
| 46 | + ) |
| 47 | + llm_http_endpoints: dict[str, str] = Field( |
| 48 | + default_factory=dict, |
| 49 | + description="Optional overrides for HTTP endpoints (keys: 'chat'/'summary', 'embeddings'/'embed').", |
| 50 | + ) |
| 51 | + # thresholds |
| 52 | + category_assign_threshold: float = Field(default=0.25) |
| 53 | + # summarization prompts |
| 54 | + default_summary_prompt: str = Field(default="Summarize the text in one short paragraph.") |
| 55 | + summary_prompts: dict[str, str] = Field( |
| 56 | + default_factory=dict, |
| 57 | + description="Optional mapping of modality -> summary system prompt.", |
| 58 | + ) |
| 59 | + memory_categories: list[dict[str, str]] = Field( |
| 60 | + default_factory=_default_memory_categories, |
| 61 | + description="Global memory category definitions embedded at service startup.", |
| 62 | + ) |
| 63 | + category_summary_target_length: int = Field( |
| 64 | + default=400, |
| 65 | + description="Target max length for auto-generated category summaries.", |
| 66 | + ) |
| 67 | + memory_types: list[str] = Field( |
| 68 | + default_factory=_default_memory_types, |
| 69 | + description="Ordered list of memory types (profile/event/knowledge/behavior by default).", |
| 70 | + ) |
| 71 | + memory_type_prompts: dict[str, str] = Field( |
| 72 | + default_factory=_default_memory_type_prompts, |
| 73 | + description="System prompt overrides for each memory type extraction.", |
| 74 | + ) |
0 commit comments