Skip to content

Commit 68880c1

Browse files
authored
Merge pull request #448 from sudoleg/feat/gpt-5
GPT-5 models
2 parents a85a0ab + e3895d5 commit 68880c1

File tree

6 files changed

+19
-10
lines changed

6 files changed

+19
-10
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ YouTubeGPT lets you **summarize and chat (Q&A)** with YouTube videos. Its featur
2323

2424
### :robot: Choose from different OpenAI models
2525

26-
- currently available: gpt-3.5-turbo, gpt-4 (turbo), gpt-4o (mini)
27-
- by choosing a different model, you can summarize even longer videos and potentially get better responses
26+
- currently available: ChatGPT 4-5 (incl. nano & mini) and *continuously updated* with new models
27+
- by choosing a different model, you can summarize even longer videos and get better responses
2828

2929
### :gear: Experiment with settings
3030

config.json

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
},
88
"default_model": {
99
"embeddings": "text-embedding-3-small",
10-
"gpt": "gpt-4o-mini"
10+
"gpt": "gpt-4.1-nano"
1111
},
1212
"available_models": {
1313
"embeddings": [
@@ -17,6 +17,11 @@
1717
],
1818
"gpts": [
1919
"gpt-4.1-nano",
20+
"gpt-5.1-chat-latest",
21+
"gpt-5.1",
22+
"gpt-5-nano",
23+
"gpt-5-mini",
24+
"gpt-5",
2025
"gpt-4.1-mini",
2126
"gpt-4.1",
2227
"gpt-4o-mini",

modules/summary.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import logging
22

3-
from langchain.chat_models import BaseChatModel
43
from langchain.messages import HumanMessage, SystemMessage
4+
from langchain_openai import ChatOpenAI
55

66
from .helpers import num_tokens_from_string, read_file
77

@@ -19,6 +19,11 @@
1919
"gpt-4.1-nano": {"total": 1047576, "output": 32768},
2020
"gpt-4.1-mini": {"total": 1047576, "output": 32768},
2121
"gpt-4.1": {"total": 1047576, "output": 32768},
22+
"gpt-5.1-chat-latest": {"total": 400000, "output": 128000},
23+
"gpt-5.1": {"total": 400000, "output": 128000},
24+
"gpt-5-nano": {"total": 400000, "output": 128000},
25+
"gpt-5-mini": {"total": 400000, "output": 128000},
26+
"gpt-5": {"total": 400000, "output": 128000},
2227
}
2328

2429

@@ -35,13 +40,13 @@ def log_error(self):
3540
logging.error("Transcript too long for %s.", self.model_name, exc_info=True)
3641

3742

38-
def get_transcript_summary(transcript_text: str, llm: BaseChatModel, **kwargs):
43+
def get_transcript_summary(transcript_text: str, llm: ChatOpenAI, **kwargs):
3944
"""
4045
Generates a summary from a video transcript using a language model.
4146
4247
Args:
4348
transcript_text (str): The full transcript text of the video.
44-
llm (BaseChatModel): The language model instance to use for generating the summary.
49+
llm (ChatOpenAI): The language model instance to use for generating the summary.
4550
**kwargs: Optional keyword arguments.
4651
- custom_prompt (str): A custom prompt to replace the default summary request.
4752

modules/ui.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def display_model_settings_sidebar():
9595
)
9696
if model != get_default_config_value("default_model.gpt"):
9797
st.warning(
98-
""":warning: Be aware of the higher costs and potentially higher latencies when using more advanced models (like gpt-4 and gpt-4o). You can see details (incl. costs) about the models and compare them [here](https://platform.openai.com/docs/models/compare)."""
98+
""":warning: Be aware of the higher costs and latencies when using more advanced (reasoning) models (like gpt-5). You can see details (incl. costs) about the models and compare them [here](https://platform.openai.com/docs/models/compare)."""
9999
)
100100

101101

pages/chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def save_response_to_lib():
148148
temperature=st.session_state.temperature,
149149
model=st.session_state.model,
150150
top_p=st.session_state.top_p,
151-
max_tokens=2048,
151+
# max_tokens=2048,
152152
)
153153
openai_embedding_model = OpenAIEmbeddings(
154154
api_key=st.session_state.openai_api_key,

pages/summary.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,7 @@ def save_summary_to_lib():
116116
temperature=st.session_state.temperature,
117117
model=st.session_state.model,
118118
top_p=st.session_state.top_p,
119-
max_tokens=2048,
120-
use_responses_api=False,
119+
# max_completion_tokens=4096,
121120
)
122121
with st.spinner("Summarizing video :gear: Hang on..."):
123122
if custom_prompt:

0 commit comments

Comments
 (0)