Skip to content

Commit 0a510e6

Browse files
committed
refactor : query_enrichment_chain 만들고 context_enrichment_node에 연결
1 parent 4b41c96 commit 0a510e6

File tree

2 files changed

+34
-29
lines changed

2 files changed

+34
-29
lines changed

llm_utils/chains.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,32 @@ def create_query_refiner_with_profile_chain(llm):
135135
)
136136

137137

138+
def create_query_enrichment_chain(llm):
139+
140+
enrichment_prompt = PromptTemplate(
141+
input_variables=["refined_question", "profiles", "related_tables"],
142+
template="""
143+
You are a smart assistant that takes a user question and enriches it using:
144+
1. Question profiles: {profiles}
145+
2. Table metadata (names, columns, descriptions):
146+
{related_tables}
147+
148+
Tasks:
149+
- Correct any wrong terms by matching them to actual column names.
150+
- If the question is time-series or aggregation, add explicit hints (e.g., "over the last 30 days").
151+
- If needed, map natural language terms to actual column values (e.g., ‘미국’ → ‘USA’ for country_code).
152+
- Output the enriched question only.
153+
154+
Refined question:
155+
{refined_question}
156+
157+
Using the refined version for enrichment, but keep original intent in mind.
158+
""".strip(),
159+
)
160+
161+
return enrichment_prompt | llm
162+
163+
138164
def create_profile_extraction_chain(llm):
139165
chain = profile_prompt | llm.with_structured_output(QuestionProfile)
140166
return chain
@@ -144,6 +170,7 @@ def create_profile_extraction_chain(llm):
144170
query_maker_chain = create_query_maker_chain(llm)
145171
profile_extraction_chain = create_profile_extraction_chain(llm)
146172
query_refiner_with_profile_chain = create_query_refiner_with_profile_chain(llm)
173+
query_enrichment_chain = create_query_enrichment_chain(llm)
147174

148175
if __name__ == "__main__":
149176
query_refiner_chain.invoke()

llm_utils/graph.py

Lines changed: 7 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
query_maker_chain,
1414
query_refiner_with_profile_chain,
1515
profile_extraction_chain,
16+
query_enrichment_chain,
1617
)
1718

1819
from llm_utils.tools import get_info_from_db
@@ -142,36 +143,13 @@ def context_enrichment_node(state: QueryMakerState):
142143
question_profile = state["question_profile"].model_dump()
143144
question_profile_json = json.dumps(question_profile, ensure_ascii=False, indent=2)
144145

145-
from langchain.prompts import PromptTemplate
146-
147-
enrichment_prompt = PromptTemplate(
148-
input_variables=["refined_question", "profiles", "related_tables"],
149-
template="""
150-
You are a smart assistant that takes a user question and enriches it using:
151-
1. Question profiles: {profiles}
152-
2. Table metadata (names, columns, descriptions):
153-
{related_tables}
154-
155-
Tasks:
156-
- Correct any wrong terms by matching them to actual column names.
157-
- If the question is time-series or aggregation, add explicit hints (e.g., "over the last 30 days").
158-
- If needed, map natural language terms to actual column values (e.g., ‘미국’ → ‘USA’ for country_code).
159-
- Output the enriched question only.
160-
161-
Refined question:
162-
{refined_question}
163-
164-
Using the refined version for enrichment, but keep original intent in mind.
165-
""".strip(),
166-
)
167-
168-
llm = get_llm()
169-
prompt = enrichment_prompt.format_prompt(
170-
refined_question=state["refined_input"],
171-
profiles=question_profile_json,
172-
related_tables=searched_tables_json,
146+
enriched_text = query_enrichment_chain.invoke(
147+
input={
148+
"refined_question": state["refined_input"],
149+
"profiles": question_profile_json,
150+
"related_tables": searched_tables_json,
151+
}
173152
)
174-
enriched_text = llm.invoke(prompt.to_messages())
175153

176154
state["refined_input"] = enriched_text
177155
state["messages"].append(enriched_text)

0 commit comments

Comments
 (0)