|
1 | | -from api.nodes.fetch_summary_node import Node |
2 | 1 | import logging |
| 2 | +import json |
| 3 | +from api.nodes.fetch_summary_node import Node |
| 4 | +from api.config import OPENAI_MODEL |
| 5 | + |
3 | 6 | logging.basicConfig(level=logging.INFO) |
4 | 7 | logger = logging.getLogger(__name__) |
5 | 8 |
|
| 9 | +# Prompt quotas per category (total 9 slots) |
| 10 | +QUOTAS = {"Marketing": 3, "Sales": 2, "Product": 2, "Success": 1, "Ops": 1} |
| 11 | + |
6 | 12 | @Node(retries=3) |
7 | 13 | def PromptDraftNode(text: str, framework_plan: dict) -> dict[str, list[str]]: |
8 | 14 | """ |
9 | 15 | Draft 10-25 raw prompts with explicit constraints and strong business anchoring. |
10 | 16 | `framework_plan` **must** contain "key_phrases": list[str]. |
11 | 17 | """ |
12 | | - import json, hashlib, logging, openai |
13 | | - |
14 | | - logger = logging.getLogger(__name__) |
15 | | - key_phrases: list[str] = framework_plan.get("key_phrases", []) |
16 | | - min_phrases_required = 2 if key_phrases else 1 # fallback if none |
| 18 | + import openai, hashlib |
17 | 19 |
|
| 20 | + # Framework plan may include quotas and capsule context |
18 | 21 | client = openai.OpenAI() |
19 | 22 |
|
| 23 | + # System prompt for generating prompt packs grounded in business capsule |
20 | 24 | system_msg = { |
21 | 25 | "role": "system", |
22 | 26 | "content": ( |
23 | | - "Draft 10-25 AI prompts grouped by business function. " |
24 | | - "Return ONLY valid JSON shaped as:\n" |
25 | | - "{\n" |
26 | | - " \"Marketing\": [\"You are a ...\", ...],\n" |
27 | | - " \"Sales\": [...],\n" |
28 | | - " \"Success\": [...],\n" |
29 | | - " \"Product\": [...],\n" |
30 | | - " \"Ops\": [...]\n" |
31 | | - "}\n" |
32 | | - f"Rules: • each prompt begins with \"You are a ...\" • min {min_phrases_required} key-phrases " |
33 | | - "• ≤220 tokens • quotas: Marketing 3, Sales 2, Success 2, Product 2, Ops 1." |
| 27 | + "You are a Prompt-Pack Generator. Given a Business Context Capsule and a framework plan, " |
| 28 | + "generate 10–25 high-quality prompts grouped by business function. " |
| 29 | + "Return ONLY valid JSON mapping categories to arrays of prompt strings. " |
| 30 | + "Prompts must be no more than 220 tokens each. " |
| 31 | + f"Quotas per category: {QUOTAS}." |
34 | 32 | ) |
35 | 33 | } |
36 | 34 |
|
37 | 35 | user_msg = { |
38 | 36 | "role": "user", |
39 | 37 | "content": ( |
40 | | - f"<business_text>{text}</business_text>\n" |
41 | | - f"<key_phrases>{', '.join(key_phrases)}</key_phrases>\n" |
| 38 | + f"<capsule>{text}</capsule>\n" |
42 | 39 | f"<framework_plan>{json.dumps(framework_plan, ensure_ascii=False)}</framework_plan>" |
43 | 40 | ), |
44 | 41 | } |
45 | 42 |
|
| 43 | + # Use deterministic seed for repeatability |
46 | 44 | seed_val = int( |
47 | | - hashlib.sha256((text + 'gpt-4.1-mini-2025-04-14').encode()).hexdigest(), 16 |
| 45 | + hashlib.sha256((text + OPENAI_MODEL).encode()).hexdigest(), 16 |
48 | 46 | ) % 2**31 |
49 | 47 |
|
50 | 48 | resp = client.chat.completions.create( |
51 | | - model="gpt-4.1-mini-2025-04-14", |
| 49 | + model=OPENAI_MODEL, |
52 | 50 | messages=[system_msg, user_msg], |
53 | | - temperature=0.0, # deterministic |
| 51 | + temperature=0.35, |
54 | 52 | seed=seed_val, |
55 | 53 | ) |
56 | 54 |
|
|
0 commit comments