Skip to content
This repository was archived by the owner on Sep 12, 2024. It is now read-only.

Commit 5704430

Browse files
committed
major updates on cli module
1 parent ee877ef commit 5704430

File tree

2 files changed

+52
-30
lines changed

2 files changed

+52
-30
lines changed

autollm/serve/cli.py

Lines changed: 34 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,13 @@
66

77
from autollm.auto.llm import AutoLiteLLM
88
from autollm.auto.query_engine import AutoQueryEngine
9-
from autollm.serve.prompts import LLM_BUILDER_SYSTEM_PROMPT
9+
from autollm.serve.llm_utils import create_custom_llm
1010
from autollm.utils.document_reading import read_files_as_documents
1111

1212
llama_index.set_global_handler("simple")
1313

14-
llm_builder = AutoLiteLLM.from_defaults(system_prompt=LLM_BUILDER_SYSTEM_PROMPT)
1514

16-
17-
def configure_app(
18-
openai_api_key, palm_api_key, uploaded_files, webpage_input, what_to_make_area, config_file, emoji,
19-
name, description, instruction):
15+
def create_app(openai_api_key, palm_api_key, what_to_make_area, uploaded_files, webpage_input, config_file):
2016
global query_engine
2117
progress = gr.Progress()
2218

@@ -26,6 +22,10 @@ def configure_app(
2622
progress(0.2, desc="Reading files...")
2723
file_documents = read_files_as_documents(input_files=uploaded_files)
2824

25+
progress(0.4, desc="Updating LLM...")
26+
custom_llm = create_custom_llm(user_prompt=what_to_make_area, config=config_file)
27+
emoji, name, description, instruction = update_configurations(custom_llm)
28+
2929
progress(0.8, desc="Configuring app..")
3030
query_engine = AutoQueryEngine.from_defaults(
3131
documents=file_documents,
@@ -36,8 +36,22 @@ def configure_app(
3636

3737
# Complete progress
3838
progress(1.0, desc="Completed") # Complete progress bar
39+
create_preview_output = gr.Textbox("App preview created on the right screen.")
40+
41+
return create_preview_output, emoji, name, description, instruction
42+
43+
44+
def update_configurations(custom_llm):
45+
emoji = custom_llm.emoji
46+
name = custom_llm.name
47+
description = custom_llm.description
48+
instruction = custom_llm.instructions
49+
50+
return gr.Textbox(emoji), gr.Textbox(name), gr.Textbox(description), gr.Textbox(instruction)
51+
3952

40-
return gr.Textbox("App preview created on the right screen.")
53+
def update_app():
54+
pass
4155

4256

4357
def predict(message, history):
@@ -89,15 +103,15 @@ def predict(message, history):
89103
with gr.Accordion(label="Load config file", open=False):
90104
config_file_upload = gr.File(
91105
label="Configurations of LLM, Vector Store..", file_count="single")
92-
emoji_input = gr.Textbox(label="Emoji")
93-
name_input = gr.Textbox(label="Name")
94-
description_input = gr.Textbox(label="Description")
95-
instruction_input = gr.TextArea(label="Instructions")
106+
emoji = gr.Textbox(label="Emoji")
107+
name = gr.Textbox(label="Name")
108+
description = gr.Textbox(label="Description")
109+
instruction = gr.TextArea(label="Instructions")
96110
with gr.Row():
97111
with gr.Column(scale=1, min_width=10):
98112
placeholder = gr.Button(visible=False, interactive=False)
99113
with gr.Column(scale=1, min_width=100):
100-
create_preview_button_2 = gr.Button("Create Preview", variant="primary")
114+
update_preview_button = gr.Button("Update Preview", variant="primary")
101115
configure_output = gr.Textbox(label="👆 Click `Create Preview` to see preview of the LLM app")
102116
with gr.Tab("Export"):
103117
# Controls for 'Export' tab
@@ -121,18 +135,18 @@ def predict(message, history):
121135
chat_interface = gr.ChatInterface(predict, chatbot=chatbot)
122136

123137
create_preview_button.click(
124-
configure_app,
138+
create_app,
125139
inputs=[
126-
openai_api_key_input, palm_api_key_input, uploaded_files, webpage_input, what_to_make_area,
127-
config_file_upload, emoji_input, name_input, description_input, instruction_input
140+
openai_api_key_input, palm_api_key_input, what_to_make_area, uploaded_files, webpage_input,
141+
config_file_upload
128142
],
129-
outputs=[create_preview_output])
143+
outputs=[create_preview_output, emoji, name, description, instruction])
130144

131-
create_preview_button_2.click(
132-
configure_app,
145+
update_preview_button.click(
146+
update_app,
133147
inputs=[
134-
openai_api_key_input, palm_api_key_input, uploaded_files, webpage_input, what_to_make_area,
135-
config_file_upload, emoji_input, name_input, description_input, instruction_input
148+
openai_api_key_input, palm_api_key_input, what_to_make_area, uploaded_files, webpage_input,
149+
config_file_upload, emoji, name, description, instruction
136150
],
137151
outputs=[configure_output],
138152
scroll_to_output=True)

autollm/serve/llm_utils.py

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,13 @@
55

66
from autollm import AutoLiteLLM
77

8+
DEFAULT_LLM_MODEL = "azure/gpt-4-1106"
9+
DEFAULT_LLM_MAX_TOKENS = 1024
10+
DEFAULT_LLM_TEMPERATURE = 0.1
11+
812

913
class CustomLLM(BaseModel):
10-
"""Data model for custom LLM creation."""
14+
"""Data model for custom LLM creation from user prompt."""
1115

1216
emoji: str = Field(
1317
...,
@@ -52,17 +56,18 @@ class CustomLLM(BaseModel):
5256

5357

5458
PROMPT_TEMPLATE_STR = """\
55-
Enhance the following user prompt for optimal interaction \
56-
with a custom LLM model. Ensure the revised prompt maintains the \
57-
original intent, is clear and detailed, and is adapted to the \
58-
specific context and task mentioned in the user input.
59-
60-
User Input: {user_prompt}
59+
Your task is to revise the user prompt and create a JSON object \
60+
in the format of the CustomLLM data model. The JSON object will \
61+
be used to create a custom LLM model. Ensure the revised prompt \
62+
maintains the original intent, is clear and detailed, and is \
63+
adapted to the specific context and task mentioned in the user input.
6164
6265
1. Analyze the basic prompt to understand its primary purpose and context.
6366
2. Refine the prompt to be clear, detailed, specific, and tailored to the context and task.
6467
3. Retain the core elements and intent of the original prompt.
6568
4. Provide an enhanced version of the prompt, ensuring it is optimized for a LLM model interaction.
69+
70+
User prompt: {user_prompt}
6671
"""
6772

6873

@@ -71,9 +76,12 @@ def create_custom_llm(user_prompt: str, config: Optional[Any] = None) -> CustomL
7176
if not user_prompt:
7277
raise ValueError("Please fill in the area of 'What would you like to make?'")
7378

74-
llm_model = config.get('llm_model', 'azure/gpt-4-1106')
75-
llm_max_tokens = config.get('llm_max_tokens', 1024)
76-
llm_temperature = config.get('llm_temperature', 0.1)
79+
if not config:
80+
config = {}
81+
82+
llm_model = config.get('llm_model', DEFAULT_LLM_MODEL)
83+
llm_max_tokens = config.get('llm_max_tokens', DEFAULT_LLM_MAX_TOKENS)
84+
llm_temperature = config.get('llm_temperature', DEFAULT_LLM_TEMPERATURE)
7785
llm_api_base = config.get('llm_api_base', None)
7886

7987
llm = AutoLiteLLM.from_defaults(

0 commit comments

Comments
 (0)