@@ -113,55 +113,28 @@ def create_query_refiner_with_profile_chain(llm):
113113 return tool_choice_prompt | llm
114114
115115
116- from langchain .prompts import PromptTemplate
117-
118- profile_prompt = PromptTemplate (
119- input_variables = ["question" ],
120- template = """
121- You are an assistant that analyzes a user question and extracts the following profiles as JSON:
122- - is_timeseries (boolean)
123- - is_aggregation (boolean)
124- - has_filter (boolean)
125- - is_grouped (boolean)
126- - has_ranking (boolean)
127- - has_temporal_comparison (boolean)
128- - intent_type (one of: trend, lookup, comparison, distribution)
129-
130- Return only valid JSON matching the QuestionProfile schema.
131-
132- Question:
133- {question}
134- """ .strip (),
135- )
136-
137-
138116def create_query_enrichment_chain (llm ):
117+ prompt = get_prompt_template ("query_enrichment_prompt" )
139118
140- enrichment_prompt = PromptTemplate (
141- input_variables = ["refined_question" , "profiles" , "related_tables" ],
142- template = """
143- You are a smart assistant that takes a user question and enriches it using:
144- 1. Question profiles: {profiles}
145- 2. Table metadata (names, columns, descriptions):
146- {related_tables}
147-
148- Tasks:
149- - Correct any wrong terms by matching them to actual column names.
150- - If the question is time-series or aggregation, add explicit hints (e.g., "over the last 30 days").
151- - If needed, map natural language terms to actual column values (e.g., ‘미국’ → ‘USA’ for country_code).
152- - Output the enriched question only.
153-
154- Refined question:
155- {refined_question}
156-
157- Using the refined version for enrichment, but keep original intent in mind.
158- """ .strip (),
119+ enrichment_prompt = ChatPromptTemplate .from_messages (
120+ [
121+ SystemMessagePromptTemplate .from_template (prompt ),
122+ ]
159123 )
160124
161- return enrichment_prompt | llm
125+ chain = enrichment_prompt | llm
126+ return chain
162127
163128
164129def create_profile_extraction_chain (llm ):
130+ prompt = get_prompt_template ("profile_extraction_prompt" )
131+
132+ profile_prompt = ChatPromptTemplate .from_messages (
133+ [
134+ SystemMessagePromptTemplate .from_template (prompt ),
135+ ]
136+ )
137+
165138 chain = profile_prompt | llm .with_structured_output (QuestionProfile )
166139 return chain
167140
0 commit comments