Skip to content

Commit 339962f

Browse files
committed
feat: UI component consistency through conversation
1 parent 8b93009 commit 339962f

File tree

14 files changed

+2489
-16
lines changed

14 files changed

+2489
-16
lines changed

libs/next_gen_ui_agent/agent.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,9 @@ async def component_selection(
126126
)
127127

128128
input_to_dynamic_selection = AgentInput(
129-
user_prompt=input["user_prompt"], input_data=to_dynamic_selection
129+
user_prompt=input["user_prompt"],
130+
input_data=to_dynamic_selection,
131+
previous_user_prompts=input.get("previous_user_prompts"),
130132
)
131133
from_dynamic_selection = (
132134
await self._component_selection_strategy.select_components(

libs/next_gen_ui_agent/component_selection.py

Lines changed: 40 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,16 @@
1515
from pydantic_core import from_json
1616

1717
ui_components_description_supported = """
18-
* one-card - component to visualize multiple fields from one-item data. One image can be shown if url is available together with other fields. Array of simple values from one-item data can be shown as a field. Array of objects can't be shown as a field.
19-
* video-player - component to play video from one-item data. Videos like trailers, promo videos. Data must contain url pointing to the video to be shown, e.g. https://www.youtube.com/watch?v=v-PjgYDrg70
20-
* image - component to show one image from one-item data. Images like posters, covers, pictures. Do not use for video! Select it if no other fields are necessary to be shown. Data must contain url pointing to the image to be shown, e.g. https://www.images.com/v-PjgYDrg70.jpeg
18+
- 'one-card': component to show multiple fields from one-item data. One image can be shown if url is available together with other fields. Array of simple values from one-item data can be shown as a field. Array of objects can't be shown as a field.
19+
- 'video-player': component to play video from one-item data. Videos like trailers, promo videos. Data must contain url pointing to the video to be shown, e.g. https://www.youtube.com/watch?v=v-PjgYDrg70
20+
- 'image': component to show one image from one-item data. Images like posters, covers, pictures. Do not use for video! Do not use if other fields has to be shown, use `one-card` instead! Data must contain url pointing to the image to be shown, e.g. https://www.images.com/v-PjgYDrg70.jpeg
2121
"""
2222

2323
ui_components_description_all = (
2424
ui_components_description_supported
2525
+ """
26-
* table - component to visualize array of objects with more than 6 items and small number of shown fields with short values.
27-
* set-of-cards - component to visualize array of objects with less than 6 items, or high number of shown fields and fields with long values.
26+
- 'table': component to show array of objects with more than 6 items and small number of shown fields with short values.
27+
- 'set-of-cards': component to show array of objects with less than 6 items, or high number of shown fields and fields with long values.
2828
""".strip()
2929
)
3030

@@ -64,7 +64,12 @@ async def select_components(
6464
logger.debug("---CALL component_selection---")
6565
components = await asyncio.gather(
6666
*[
67-
self.component_selection_run(inference, input["user_prompt"], data)
67+
self.component_selection_run(
68+
inference,
69+
input["user_prompt"],
70+
data,
71+
input.get("previous_user_prompts"),
72+
)
6873
for data in input["input_data"]
6974
]
7075
)
@@ -79,6 +84,7 @@ async def perform_inference(
7984
user_prompt: str,
8085
json_data: Any,
8186
input_data_id: str,
87+
previous_user_prompts: list[str] | None = None,
8288
) -> list[str]:
8389
"""Run Component Selection inference."""
8490

@@ -89,17 +95,23 @@ async def perform_inference(
8995
# logger.debug(user_prompt)
9096
# logger.debug(input_data)
9197

92-
sys_msg_content = f"""You are helpful and advanced user interface design assistant. Based on the "User query" and JSON formatted "Data", select the best UI component to visualize the "Data" to the user.
98+
sys_msg_content = """You are helpful and advanced user interface design assistant. Based on the "User query" and JSON formatted "Data", select the best UI component to visualize the "Data" to the user.
9399
Generate response in the JSON format only. Select one component only into "component".
94100
Provide the title for the component in "title".
95101
Provide reason for the component selection in the "reasonForTheComponentSelection".
96102
Provide your confidence for the component selection as a percentage in the "confidenceScore".
97103
Provide list of "fields" to be visualized in the UI component. Select only relevant data fields to be presented in the component. Do not bloat presentation. Show all the important info about the data item. Mainly include information the user asks for in User query.
98104
If the selected UI component requires specific fields mentioned in its description, provide them. Provide "name" for every field.
99105
For every field provide "data_path" containing JSONPath to get the value from the Data. Do not use any formatting or calculation in the "data_path".
106+
"""
100107

101-
Select one from there UI components: {get_ui_components_description(self.unsupported_components)}
102-
"""
108+
sys_msg_content += f"""
109+
Select one from these UI components: {get_ui_components_description(self.unsupported_components)}
110+
"""
111+
if previous_user_prompts and len(previous_user_prompts) > 0:
112+
sys_msg_content += """
113+
Generate response for the current "User query" and "Data". Keep current UI component and its configuration consistent with the expected UI component generated previously for the "Previous user queries" if applicable for the current "User query" and "Data".
114+
"""
103115

104116
sys_msg_content += """
105117
Response example for multi-item data:
@@ -129,12 +141,24 @@ async def perform_inference(
129141
# we have to parse JSON data to reduce arrays
130142
data = reduce_arrays(json_data, 6)
131143

132-
prompt = f"""=== User query ===
133-
{user_prompt}
144+
prompt = f"""User query:
145+
{user_prompt}
146+
"""
147+
148+
if previous_user_prompts and len(previous_user_prompts) > 0:
149+
prompt += f"""
150+
Previous user queries:
151+
- {"\n- ".join(previous_user_prompts)}
152+
"""
134153

135-
=== Data ===
136-
{str(data)}
137-
"""
154+
prompt += f"""
155+
Data:
156+
{str(data)}
157+
"""
158+
159+
# TODO remove this after testing
160+
# if previous_user_prompts and len(previous_user_prompts) > 0:
161+
# print("prompt:", prompt)
138162

139163
logger.debug("LLM system message:\n%s", sys_msg_content)
140164
logger.debug("LLM prompt:\n%s", prompt)
@@ -162,6 +186,7 @@ async def component_selection_run(
162186
inference: InferenceBase,
163187
user_prompt: str,
164188
input_data: InputData,
189+
previous_user_prompts: list[str] | None = None,
165190
) -> UIComponentMetadata:
166191
"""Run Component Selection task."""
167192

@@ -173,7 +198,7 @@ async def component_selection_run(
173198
json_data = wrap_json_data(json_data, input_data.get("type"))
174199

175200
inference_output = await self.perform_inference(
176-
inference, user_prompt, json_data, input_data_id
201+
inference, user_prompt, json_data, input_data_id, previous_user_prompts
177202
)
178203

179204
try:

libs/next_gen_ui_agent/types.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ class AgentInput(TypedDict):
7171

7272
user_prompt: str
7373
"""User prompt to be processed."""
74+
previous_user_prompts: NotRequired[list[str] | None]
75+
"""Previous user prompts - used to achieve UI component consistency through the conversation. List starts with the most recent prompt from the histroy and goes to older."""
7476
input_data: list[InputData]
7577
"""Input data to be processed - one or more can be provided."""
7678

tests/ui_consistency_poc/README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Research/PoC of the UI component consistency during conversation
2+
3+
[![Module Category](https://img.shields.io/badge/Module%20Category-Testing/Evaluation-darkmagenta)](https://github.com/RedHat-UX/next-gen-ui-agent)
4+
[![Module Status](https://img.shields.io/badge/Module%20Status-Tech%20Preview-orange)](https://github.com/RedHat-UX/next-gen-ui-agent)
5+
6+
This module contains code used for UI component consistency during conversation research/PoC and detailed results
7+
8+
## Provides
9+
10+
* `one_component.py` - runnable code for one component consistency research PoC - you can select LLM at the begining (uses embedded Llama Stack). Writes out to console and files in `one_component_results/` directory
11+
* `one_component_results/` - directory with results from `one_component.py`. Files names are LLM used for the run.

tests/ui_consistency_poc/__init__.py

Whitespace-only changes.
Lines changed: 245 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,245 @@
1+
import asyncio
2+
import os
3+
from next_gen_ui_agent import NextGenUIAgent
4+
from next_gen_ui_agent.types import InputData
5+
from next_gen_ui_llama_stack_embedded import init_inference_from_env
6+
7+
8+
if True:
9+
# Ollama models:
10+
LLAMASTACK_CONFIG_PATH_DEFAULT = "tests/ai_eval_components/llamastack-ollama.yaml"
11+
#INFERENCE_MODEL_DEFAULT = "granite3.2:2b"
12+
#INFERENCE_MODEL_DEFAULT = "granite3.3:2b"
13+
INFERENCE_MODEL_DEFAULT="granite3.3:8b"
14+
#INFERENCE_MODEL_DEFAULT="ollama/llama3.2:latest"
15+
16+
else:
17+
# Gemini API models:
18+
LLAMASTACK_CONFIG_PATH_DEFAULT = "tests/ai_eval_components/llamastack-m.gemini.yaml"
19+
INFERENCE_MODEL_DEFAULT="gemini/gemini-2.0-flash-lite"
20+
#INFERENCE_MODEL_DEFAULT="gemini/gemini-2.0-flash"
21+
#INFERENCE_MODEL_DEFAULT="gemini/gemini-2.5-flash-lite"
22+
#INFERENCE_MODEL_DEFAULT="gemini/gemini-2.5-flash"
23+
os.environ["INFERENCE_MODEL"] = INFERENCE_MODEL_DEFAULT
24+
25+
BY_PREVIOUS_COMPONENT_PROMPT_PRFIX = '\nKeep UI component and its configuration consistent with previously shown '
26+
BY_PREVIOUS_COMPONENT_PROMPT_SUFFIX = ' and only update it to match the "User Query" and "Data" if applicable pushing requested info up, otherwise replace it with another one.'
27+
28+
#TOYSTORY_DATA = '{"movie": {"title": "Toy Story", "pictureUrl": "https://example.com/poster.jpg", "imdb" : { "rating": 8.3 }, "countries": ["USA"], "actors": ["Tom Hanks", "Tim Allen", "Jim Varney", "Don Rickles"], "released": 1995}}'
29+
TOYSTORY_DATA = '''{
30+
"movie": {
31+
"languages": [
32+
"English"
33+
],
34+
"year": 1995,
35+
"pictureUrl": "https://image.tmdb.org/t/p/w440_and_h660_face/uXDfjJbdP4ijW5hWSBrPrlKpxab.jpg",
36+
"runtime": 81,
37+
"movieId": "1",
38+
"imdb": {
39+
"votes": 591836,
40+
"id": "0114709",
41+
"rating": 8.3
42+
},
43+
"countries": [
44+
"USA",
45+
"Germany",
46+
"Czech Republic"
47+
],
48+
"trailerUrl": "https://www.youtube.com/watch?v=v-PjgYDrg70",
49+
"title": "Toy Story",
50+
"url": "https://themoviedb.org/movie/862",
51+
"revenue": 373554033,
52+
"tmdbId": "862",
53+
"plot": "A cowboy doll is profoundly threatened and jealous when a new spaceman figure supplants him as top toy in a boy's room.",
54+
"released": "1995-11-22",
55+
"budget": 30000000
56+
},
57+
"actors": [
58+
"Jim Varney",
59+
"Tim Allen",
60+
"Tom Hanks",
61+
"Don Rickles"
62+
]
63+
}'''
64+
65+
PROMPT_MOVIE_INFO = "Show me basic info about Toy Story."
66+
PROMPT_MOVIE_RELEASED = "When was the movie released?"
67+
PROMPT_MOVIE_POSTER = "Show me the poster only."
68+
PROMPT_MOVIE_SUBSCRIPTIONS = "Show me my subscriptions"
69+
70+
def open_results_file():
71+
"""
72+
Generate a valid filename from INFERENCE_MODEL_DEFAULT and open it for writing.
73+
Keeps only the part after the last '/' and adds .txt suffix.
74+
If file exists, it will be replaced.
75+
Creates the file in the 'one_component_results' subfolder relative to this script's directory.
76+
77+
Returns:
78+
file: Open file handle for writing
79+
"""
80+
import os
81+
# Get the directory where this script is located
82+
script_dir = os.path.dirname(os.path.abspath(__file__))
83+
results_dir = os.path.join(script_dir, 'one_component_results')
84+
85+
# Create the subfolder if it doesn't exist
86+
os.makedirs(results_dir, exist_ok=True)
87+
88+
# Extract the part after the last '/'
89+
filename = INFERENCE_MODEL_DEFAULT.split('/')[-1]
90+
# Add .txt suffix
91+
full_filename = os.path.join(results_dir, f"{filename}.txt")
92+
93+
print(f"\nWriting results to {full_filename} ...\n")
94+
95+
# Open file for writing, replacing if it exists
96+
return open(full_filename, 'w')
97+
98+
# open results file
99+
results_file = open_results_file()
100+
101+
def print_response(title, llm_response):
102+
response_text = f"{title}{llm_response[0].model_dump_json(indent=2)}\n\n"
103+
print(response_text, end='')
104+
results_file.write(response_text)
105+
results_file.flush()
106+
107+
def print_header(title):
108+
header_text = f"{title}\n\n"
109+
print(header_text, end='')
110+
results_file.write(header_text)
111+
results_file.flush()
112+
113+
if __name__ == "__main__":
114+
115+
inference = init_inference_from_env(
116+
default_model=INFERENCE_MODEL_DEFAULT,
117+
default_config_file=LLAMASTACK_CONFIG_PATH_DEFAULT,
118+
)
119+
120+
agent = NextGenUIAgent(config = {"inference": inference, "unsupported_components": True})
121+
122+
print_header("*** 1. Current: Info about Toy Story. Previous: subscriptions table - change of the data entity ***")
123+
124+
llm_response = asyncio.run(agent.component_selection(
125+
input={
126+
"user_prompt": PROMPT_MOVIE_INFO+BY_PREVIOUS_COMPONENT_PROMPT_PRFIX+"'table' with title 'My Subscriptions' and fields ['subscription.id', 'subscription.endDate']"+BY_PREVIOUS_COMPONENT_PROMPT_SUFFIX,
127+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
128+
},
129+
))
130+
131+
print_response("Response one previous component: ", llm_response)
132+
133+
llm_response = asyncio.run(agent.component_selection(
134+
input={
135+
"user_prompt": PROMPT_MOVIE_INFO,
136+
"previous_user_prompts": [PROMPT_MOVIE_SUBSCRIPTIONS],
137+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
138+
},
139+
))
140+
141+
print_response("Response multipleprevious prompts: ", llm_response)
142+
143+
llm_response = asyncio.run(agent.component_selection(
144+
input={
145+
"user_prompt": PROMPT_MOVIE_INFO,
146+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
147+
},
148+
))
149+
150+
print_response("Response no history: ", llm_response)
151+
152+
# ----------------- 2 -----------------
153+
print_header("*** 2. Current: When was the movie released? Previous: Info about Toy Story - additional info asked about data entity from previous conversation step ***")
154+
155+
llm_response = asyncio.run(agent.component_selection(
156+
input={
157+
"user_prompt": PROMPT_MOVIE_RELEASED+BY_PREVIOUS_COMPONENT_PROMPT_PRFIX+"'one-card' with title 'Toy Story' and fields ['movie.title', 'movie.pictureUrl', 'movie.imdb.rating', 'movie.imdb.year']"+BY_PREVIOUS_COMPONENT_PROMPT_SUFFIX,
158+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
159+
},
160+
))
161+
162+
print_response("Response one previous component: ", llm_response)
163+
164+
llm_response = asyncio.run(agent.component_selection(
165+
input={
166+
"user_prompt": PROMPT_MOVIE_RELEASED,
167+
"previous_user_prompts": [PROMPT_MOVIE_INFO],
168+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
169+
},
170+
))
171+
172+
print_response("Response multiple previous prompts: ", llm_response)
173+
174+
llm_response = asyncio.run(agent.component_selection(
175+
input={
176+
"user_prompt": PROMPT_MOVIE_RELEASED,
177+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
178+
},
179+
))
180+
181+
print_response("Response no history: ", llm_response)
182+
183+
# ----------------- 3 -----------------
184+
print_header("*** 3. Current: Show me the poster only. Previous: Info about Toy Story - the same data entity, but user prompt asks for different UI component ***")
185+
186+
llm_response = asyncio.run(agent.component_selection(
187+
input={
188+
"user_prompt": PROMPT_MOVIE_POSTER+BY_PREVIOUS_COMPONENT_PROMPT_PRFIX+"'one-card' with title 'Toy Story' and fields ['movie.title', 'movie.released', 'movie.imdb.rating']"+BY_PREVIOUS_COMPONENT_PROMPT_SUFFIX,
189+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
190+
},
191+
))
192+
193+
print_response("Response one previous component: ", llm_response)
194+
195+
llm_response = asyncio.run(agent.component_selection(
196+
input={
197+
"user_prompt": PROMPT_MOVIE_POSTER,
198+
"previous_user_prompts": [PROMPT_MOVIE_INFO],
199+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
200+
},
201+
))
202+
203+
print_response("Response multiple previous prompts: ", llm_response)
204+
205+
llm_response = asyncio.run(agent.component_selection(
206+
input={
207+
"user_prompt": PROMPT_MOVIE_POSTER,
208+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
209+
},
210+
))
211+
212+
print_response("Response no history: ", llm_response)
213+
214+
# ----------------- 4 -----------------
215+
print_header("*** 4. Current: When was the movie released? Previous: Poster only, Info about Toy Story, Subscriptions table - multiple previous conversation steps including entity change and then component type change to another and back ***")
216+
217+
llm_response = asyncio.run(agent.component_selection(
218+
input={
219+
"user_prompt": PROMPT_MOVIE_RELEASED+BY_PREVIOUS_COMPONENT_PROMPT_PRFIX+"'image' with title 'Toy Story Poster' and fields ['movie.pictureUrl']"+BY_PREVIOUS_COMPONENT_PROMPT_SUFFIX,
220+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
221+
},
222+
))
223+
224+
print_response("Response one previous component: ", llm_response)
225+
226+
llm_response = asyncio.run(agent.component_selection(
227+
input={
228+
"user_prompt": PROMPT_MOVIE_RELEASED,
229+
"previous_user_prompts": [PROMPT_MOVIE_POSTER, PROMPT_MOVIE_INFO, PROMPT_MOVIE_SUBSCRIPTIONS],
230+
#"previous_user_prompts": [PROMPT_MOVIE_POSTER],
231+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
232+
},
233+
))
234+
235+
print_response("Response multiple previous prompts: ", llm_response)
236+
237+
llm_response = asyncio.run(agent.component_selection(
238+
input={
239+
"user_prompt": PROMPT_MOVIE_RELEASED,
240+
"input_data": [InputData({"id": "1", "data": TOYSTORY_DATA})],
241+
},
242+
))
243+
244+
print_response("Response no history: ", llm_response)
245+

0 commit comments

Comments
 (0)