Skip to content

Commit c32ee0b

Browse files
committed
feat : Update Huginn Hears UI and add options
Allow the user to modify some of the settings for the model and set their own prompt template.
1 parent 683c2a9 commit c32ee0b

File tree

2 files changed

+47
-7
lines changed

2 files changed

+47
-7
lines changed

huginn_hears/main.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,7 @@ def load_model(self):
217217
self.model = CustomLlamaCpp(
218218
repo_id=self.repo_id,
219219
filename=self.filename,
220+
n_gpu_layers=self.layers,
220221
**self.model_options,
221222
)
222223
try:

streamlit_app/app.py

Lines changed: 46 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,12 @@ def get_file_path(audio_file):
5656

5757

5858
def main():
59-
st.title("Huginn Hears - Audio Summarization")
59+
st.title("Huginn Hears - Audio Summarization 🐦‍⬛")
6060

61+
st.write("""Transcribe and summarize audio files using Huginn Hears on your local machine.
62+
Huginn Hears is a tool that uses NB-Whisper for transcription and any LLM from Huggingface you want to summerize the meeting.
63+
It is optimized for Norwegian and summerizing meetings.""")
64+
6165
# Notice about the initial download time for models
6266
st.info("Please note: The first run of Huginn Hears may be slower as it downloads all necessary models. This is a one-time process, and subsequent runs will be significantly faster.")
6367
# Get repo id and filename for the Hugging Face model
@@ -69,15 +73,49 @@ def main():
6973
if not mistral_model_path or not mistral_filename:
7074
st.warning("Please enter the Hugging Face repo id and the filename of the model.")
7175
st.stop()
76+
7277
else:
78+
col1, col2 = st.columns(2)
7379
# Select prompt and refine templates
74-
prompt_template = st.selectbox("Select prompt template", list(prompt_templates.keys()))
75-
refine_template = st.selectbox("Select refine template", list(refine_templates.keys()))
80+
with col1:
81+
prompt_template = st.selectbox("Select prompt template", list(prompt_templates.keys()))
82+
with col2:
83+
refine_template = st.selectbox("Select refine template", list(refine_templates.keys()))
7684

7785
# Use the selected templates
7886
selected_prompt_template = prompt_templates[prompt_template]
7987
selected_refine_template = refine_templates[refine_template]
8088

89+
# Advanced options
90+
with st.expander("**Advanced Options** 🛠️", expanded=False):
91+
# Adjust model parameters
92+
col1, col2, col3, col4 = st.columns(4)
93+
with col1:
94+
n_ctx = st.slider("Context Size", min_value=0, max_value=8192, value=4096, step=256)
95+
with col2:
96+
max_tokens = st.slider("Max Tokens (Output)", min_value=0, max_value=1024, value=512, step=128)
97+
with col3:
98+
n_batch = st.slider("Batch Size", min_value=0, max_value=512, value=16, step=4)
99+
with col4:
100+
n_threads = st.slider("Threads", min_value=0, max_value=16, value=4, step=1)
101+
102+
model_options = {
103+
"n_ctx": n_ctx,
104+
"max_tokens": max_tokens,
105+
"n_batch": n_batch,
106+
"n_threads": n_threads
107+
}
108+
109+
# Custom prompt and refine templates
110+
st.info("You can write your own prompt and refine templates. NB: Make sure to include the placeholders `{text}` and `{existing_answer}` in the templates.")
111+
custom_prompt = st.text_area("Custom Prompt Template", placeholder="Summerize this text: {text}")
112+
custom_refine = st.text_area("Custom Refine Template", placeholder="Refine the summary: {existing_answer} with additional text: {text}")
113+
if custom_prompt:
114+
selected_prompt_template = custom_prompt
115+
if custom_refine:
116+
selected_refine_template = custom_refine
117+
118+
81119
uploaded_file = st.file_uploader(
82120
"Choose an audio file", type=['wav', 'mp3', 'm4a'])
83121

@@ -90,9 +128,10 @@ def main():
90128
# Initialize the transcriber and summarizer
91129
transcriber = WhisperTranscriber()
92130
extractive_summarizer = ExtractiveSummarizer()
93-
summarizer = LLMSummarizer(repo_id=mistral_model_path, filename=mistral_filename,
131+
summarizer = LLMSummarizer(repo_id=mistral_model_path,
132+
filename=mistral_filename,
94133
prompt_template=selected_prompt_template,
95-
refine_template=selected_refine_template)
134+
refine_template=selected_refine_template, model_options=model_options)
96135

97136
# Cache the transcribe and summarize functions to avoid re-running them
98137
@st.cache_data(persist=True, show_spinner="Transcribing audio...")
@@ -129,7 +168,7 @@ def cached_summarize(transcript):
129168
gc.collect()
130169
summary = cached_summarize(summary_ex)
131170
# Display summary
132-
st.write("Summary:")
171+
st.write("**Summary**:")
133172
st.write(summary)
134173
# Download summary
135174
st.download_button(label="Download Summary", data=summary,
@@ -144,7 +183,7 @@ def cached_summarize(transcript):
144183
transcript = st.session_state.transcript
145184
summary = cached_summarize(transcript)
146185
# Display summary
147-
st.write("Summary:")
186+
st.write("**Summary**:")
148187
st.write(summary)
149188
# Download summary
150189
st.download_button(label="Download Summary", data=summary,

0 commit comments

Comments
 (0)