Skip to content

Commit 797ec15

Browse files
committed
refactor: Refactor code for improved error handling and formatting
- Improve exception handling in summarizer.py - Update dependencies in requirements.txt - Refactor cli.py for better import organization - Reformat pyproject.toml for better readability - Enhance readability of config_manager.py - Refactor interval calculation in splitting.py
1 parent 9ee2c28 commit 797ec15

File tree

6 files changed

+79
-68
lines changed

6 files changed

+79
-68
lines changed

pyproject.toml

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -9,20 +9,20 @@ description = "A CLI tool to transcribe audio files using OpenAI API"
99
readme = "README.md"
1010
requires-python = ">=3.12"
1111
classifiers = [
12-
"Programming Language :: Python :: 3",
13-
"License :: OSI Approved :: MIT License",
14-
"Operating System :: OS Independent",
12+
"Programming Language :: Python :: 3",
13+
"License :: OSI Approved :: MIT License",
14+
"Operating System :: OS Independent",
1515
]
1616
dependencies = [
17-
"pydub",
18-
"tqdm",
19-
"pyyaml",
20-
"colorama",
21-
"halo",
22-
"yamale",
23-
"tenacity",
24-
"openai",
25-
"anthropic",
17+
"pydub",
18+
"tqdm",
19+
"pyyaml",
20+
"colorama",
21+
"halo",
22+
"yamale",
23+
"tenacity",
24+
"openai",
25+
"anthropic",
2626
]
2727

2828
[project.urls]
@@ -31,14 +31,27 @@ dependencies = [
3131

3232
[tool.setuptools]
3333
packages = [
34-
"transcribe_me",
35-
"transcribe_me.config",
36-
"transcribe_me.audio",
37-
"transcribe_me.summarization",
34+
"transcribe_me",
35+
"transcribe_me.config",
36+
"transcribe_me.audio",
37+
"transcribe_me.summarization",
3838
]
3939

4040
[tool.setuptools.package-data]
4141
"transcribe_me.config" = ["*.yaml"]
4242

4343
[tool.setuptools.dynamic]
4444
version = { attr = "transcribe_me.__version__" }
45+
46+
[tool.flake8]
47+
ignore = ["E226", "E302", "E41"]
48+
max-line-length = 160
49+
exclude = ["tests/*"]
50+
max-complexity = 10
51+
52+
[tool.autopep8]
53+
max_line_length = 120
54+
ignore = ["E226", "E302", "E41"]
55+
in-place = true
56+
recursive = true
57+
aggressive = 3

requirements.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ dill==0.3.8
1414
distro==1.9.0
1515
docutils==0.21.1
1616
filelock==3.13.3
17+
flake8==7.1.1
18+
Flake8-pyproject==1.2.3
1719
fsspec==2024.3.1
1820
h11==0.14.0
1921
halo==0.0.31
@@ -42,9 +44,11 @@ pathspec==0.12.1
4244
pkginfo==1.10.0
4345
platformdirs==4.2.0
4446
prompt-toolkit==3.0.36
47+
pycodestyle==2.12.1
4548
pydantic==2.6.4
4649
pydantic_core==2.16.3
4750
pydub==0.25.1
51+
pyflakes==3.2.0
4852
Pygments==2.17.2
4953
pylint==3.1.0
5054
pyproject_hooks==1.0.0

transcribe_me/audio/splitting.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ def split_audio(file_path: str, interval_minutes: int = 10) -> list[str]:
2121
audio = AudioSegment.from_mp3(file_path)
2222

2323
interval_ms = interval_minutes * 60 * 1000
24-
chunks = [audio[i : i + interval_ms] for i in range(0, len(audio), interval_ms)]
24+
chunks = [audio[i: i + interval_ms]
25+
for i in range(0, len(audio), interval_ms)]
2526

2627
chunk_names = []
2728
spinner = Halo(text="Splitting audio", spinner="dots")

transcribe_me/cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import argparse
22
from transcribe_me.config import config_manager
3-
from transcribe_me.audio import transcription, splitting
3+
from transcribe_me.audio import transcription
44
from transcribe_me.summarization import summarizer
55

66

transcribe_me/config/config_manager.py

Lines changed: 35 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -62,54 +62,41 @@ def install_config() -> None:
6262
f"{Fore.YELLOW}This will create a configuration file and input/output folders in the current directory."
6363
)
6464

65-
config = {
66-
"openai": {
67-
"models": [
68-
{
69-
"temperature": 0.1,
70-
"max_tokens": 2048,
71-
"model": "gpt-4",
72-
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
73-
},
74-
{
75-
"temperature": 0.3,
76-
"max_tokens": 2048,
77-
"model": "gpt-4",
78-
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
79-
},
80-
{
81-
"temperature": 0.5,
82-
"max_tokens": 2048,
83-
"model": "gpt-4",
84-
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
85-
},
86-
]
87-
},
88-
"anthropic": {
89-
"models": [
90-
{
91-
"temperature": 0.1,
92-
"max_tokens": 2048,
93-
"model": "claude-3-sonnet-20240229",
94-
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
95-
},
96-
{
97-
"temperature": 0.3,
98-
"max_tokens": 2048,
99-
"model": "claude-3-sonnet-20240229",
100-
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
101-
},
102-
{
103-
"temperature": 0.5,
104-
"max_tokens": 2048,
105-
"model": "claude-3-sonnet-20240229",
106-
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
107-
},
108-
]
109-
},
110-
"input_folder": DEFAULT_INPUT_FOLDER,
111-
"output_folder": DEFAULT_OUTPUT_FOLDER,
112-
}
65+
config = {"openai": {"models": [{"temperature": 0.1,
66+
"max_tokens": 2048,
67+
"model": "gpt-4",
68+
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
69+
},
70+
{"temperature": 0.3,
71+
"max_tokens": 2048,
72+
"model": "gpt-4",
73+
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
74+
},
75+
{"temperature": 0.5,
76+
"max_tokens": 2048,
77+
"model": "gpt-4",
78+
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
79+
},
80+
]},
81+
"anthropic": {"models": [{"temperature": 0.1,
82+
"max_tokens": 2048,
83+
"model": "claude-3-sonnet-20240229",
84+
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
85+
},
86+
{"temperature": 0.3,
87+
"max_tokens": 2048,
88+
"model": "claude-3-sonnet-20240229",
89+
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
90+
},
91+
{"temperature": 0.5,
92+
"max_tokens": 2048,
93+
"model": "claude-3-sonnet-20240229",
94+
"system_prompt": "Generate a summary with key points in bold and a Next Steps section, use Markdown, be a concise tech expert but kind to non-technical readers.",
95+
},
96+
]},
97+
"input_folder": DEFAULT_INPUT_FOLDER,
98+
"output_folder": DEFAULT_OUTPUT_FOLDER,
99+
}
113100

114101
if not OPENAI_API_KEY:
115102
print(

transcribe_me/summarization/summarizer.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,11 @@ def generate_summary(
4141
summary = openai_response.choices[0].message.content.strip()
4242
except openai.error.RateLimitError as e:
4343
print(f"{Fore.YELLOW}Rate limit reached, retrying in a bit...")
44-
except e:
4544
print(f"{Fore.RED}Error: {e}")
45+
raise
46+
except Exception as e:
47+
print(f"{Fore.RED}Error: {e}")
48+
raise
4649
elif "anthropic" in platform:
4750
anthropic_client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
4851
try:
@@ -56,8 +59,11 @@ def generate_summary(
5659
summary = anthropic_response.content[0].text
5760
except anthropic.error.RateLimitError as e:
5861
print(f"{Fore.YELLOW}Rate limit reached, retrying in a bit...")
59-
except e:
6062
print(f"{Fore.RED}Error: {e}")
63+
raise
64+
except Exception as e:
65+
print(f"{Fore.RED}Error: {e}")
66+
raise
6167
return summary
6268

6369

0 commit comments

Comments
 (0)