Skip to content

Commit f1d1ff7

Browse files
committed
refactor: streamline main and OpenAI modules
- extract logging initialization and model checking into dedicated functions - improve error handling by replacing match statements with ? operator - simplify variable assignments and code flow for better readability Signed-off-by: mingcheng <[email protected]>
1 parent aa13e1f commit f1d1ff7

File tree

2 files changed

+140
-118
lines changed

2 files changed

+140
-118
lines changed

src/main.rs

Lines changed: 73 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -23,44 +23,40 @@ use async_openai::types::{
2323
ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs,
2424
};
2525
use clap::Parser;
26-
use std::error::Error;
2726
use std::fs;
2827
use std::io::Write;
28+
use std::path::Path;
2929
use tracing::{Level, debug, error, info, trace};
3030

3131
use aigitcommit::utils::{
3232
OutputFormat, check_env_variables, env, format_openai_error, save_to_file, should_signoff,
3333
};
3434

35+
// Constants for better performance and maintainability
36+
const DEFAULT_MODEL: &str = "gpt-5";
37+
const DEFAULT_LOG_COUNT: usize = 5;
38+
const SYSTEM_PROMPT: &str = include_str!("../templates/system.txt");
39+
40+
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
41+
3542
#[tokio::main]
36-
async fn main() -> std::result::Result<(), Box<dyn Error>> {
43+
async fn main() -> Result<()> {
3744
// Parse command line arguments
3845
let cli = Cli::parse();
3946

4047
// Initialize logging
41-
if cli.verbose {
42-
tracing_subscriber::fmt()
43-
.with_max_level(Level::TRACE)
44-
.without_time()
45-
.with_target(false)
46-
.init();
48+
init_logging(cli.verbose);
4749

48-
trace!(
49-
"verbose mode enabled, set the log level to TRACE. It will makes a little bit noise."
50-
);
51-
}
52-
53-
// Get the specified model name from environment variable, default to "gpt-5"
54-
let model_name = env::get("OPENAI_MODEL_NAME", "gpt-5");
50+
// Get the specified model name from environment variable, default constant
51+
let model_name = env::get("OPENAI_MODEL_NAME", DEFAULT_MODEL);
5552

5653
// Instantiate OpenAI client, ready to send requests to the OpenAI API
57-
let client = openai::OpenAI::new();
54+
let client = OpenAI::new();
5855

5956
// Check if the environment variables are set and print the configured values
6057
if cli.check_env {
6158
trace!("check env option is enabled");
6259
debug!("model name: `{}`", &model_name);
63-
6460
check_env_variables();
6561
return Ok(());
6662
}
@@ -69,58 +65,51 @@ async fn main() -> std::result::Result<(), Box<dyn Error>> {
6965
if cli.check_model {
7066
trace!("check model option is enabled");
7167
debug!("model name: `{}`", &model_name);
72-
73-
match client.check_model(&model_name).await {
74-
Ok(()) => {
75-
println!(
76-
"the model name `{}` is available, {} is ready for use!",
77-
model_name, PKG_NAME
78-
);
79-
}
80-
Err(e) => {
81-
return Err(format!("the model name `{model_name}` is not available: {e}").into());
82-
}
83-
}
84-
68+
check_model_availability(&client, &model_name).await?;
8569
return Ok(());
8670
}
8771

8872
// Initialize repository
89-
let repo_dir = fs::canonicalize(&cli.repo_path)?;
73+
let repo_path = Path::new(&cli.repo_path);
74+
let repo_dir = fs::canonicalize(repo_path)
75+
.map_err(|e| format!("failed to resolve repository path: {e}"))?;
76+
9077
if !repo_dir.is_dir() {
9178
return Err("the specified path is not a directory".into());
9279
}
9380

9481
trace!("specified repository directory: {:?}", repo_dir);
95-
let repository = Repository::new(repo_dir.to_str().unwrap_or("."))?;
82+
let repository = Repository::new(
83+
repo_dir
84+
.to_str()
85+
.ok_or("invalid UTF-8 in repository path")?,
86+
)?;
9687

9788
// Get the diff and logs from the repository
9889
let diffs = repository.get_diff()?;
9990
debug!("got diff size is {}", diffs.len());
91+
10092
if diffs.is_empty() {
101-
return Err("no diff found".into());
93+
return Err("no changes found in the repository".into());
10294
}
10395

104-
// Get the last 5 commit logs
105-
// if the repository has less than 5 commits, it will return all logs
106-
let logs = repository.get_logs(5)?;
96+
// Get the last N commit logs
97+
// if the repository has less than N commits, it will return all logs
98+
let logs = repository.get_logs(DEFAULT_LOG_COUNT)?;
10799
debug!("got logs size is {}", logs.len());
108100

109101
// If git commit log is empty, return error
110102
if logs.is_empty() {
111-
return Err("no commit logs found".into());
103+
return Err("no commit history found in the repository".into());
112104
}
113105

114106
// Generate the prompt which will be sent to OpenAI API
115107
let content = OpenAI::prompt(&logs, &diffs)?;
116108

117-
// Load the system prompt from the template file
118-
let system_prompt = include_str!("../templates/system.txt");
119-
120-
// The request contains the system message and user message
109+
// Build the chat completion request messages
121110
let messages = vec![
122111
ChatCompletionRequestSystemMessageArgs::default()
123-
.content(system_prompt)
112+
.content(SYSTEM_PROMPT)
124113
.build()?
125114
.into(),
126115
ChatCompletionRequestUserMessageArgs::default()
@@ -130,13 +119,10 @@ async fn main() -> std::result::Result<(), Box<dyn Error>> {
130119
];
131120

132121
// Send the request to OpenAI API and get the response
133-
let result = match client.chat(&model_name, messages).await {
134-
Ok(s) => s,
135-
Err(e) => {
136-
let message = format_openai_error(e);
137-
return Err(message.into());
138-
}
139-
};
122+
let result = client
123+
.chat(&model_name, messages)
124+
.await
125+
.map_err(|e| format_openai_error(e))?;
140126

141127
let (title, content) = result
142128
.split_once("\n\n")
@@ -145,16 +131,18 @@ async fn main() -> std::result::Result<(), Box<dyn Error>> {
145131
// Detect auto signoff from environment variable or CLI flag
146132
let need_signoff = should_signoff(&repository, cli.signoff);
147133

148-
let message: GitMessage = GitMessage::new(&repository, title, content, need_signoff)?;
134+
let message = GitMessage::new(&repository, title, content, need_signoff)?;
149135

150136
// Decide the output format based on the command line arguments
151137
let output_format = OutputFormat::detect(cli.json, cli.no_table);
152138
output_format.write(&message)?;
153139

154140
// Copy the commit message to clipboard if the --copy option is enabled
155141
if cli.copy_to_clipboard {
156-
let mut clipboard = Clipboard::new()?;
157-
clipboard.set_text(message.to_string())?;
142+
let mut clipboard = Clipboard::new()
143+
.map_err(|e| format!("failed to initialize clipboard: {e}"))?;
144+
clipboard.set_text(message.to_string())
145+
.map_err(|e| format!("failed to copy to clipboard: {e}"))?;
158146
writeln!(
159147
std::io::stdout(),
160148
"the commit message has been copied to clipboard."
@@ -165,10 +153,12 @@ async fn main() -> std::result::Result<(), Box<dyn Error>> {
165153
if cli.commit {
166154
trace!("commit option is enabled, will commit the changes directly to the repository");
167155

168-
if cli.yes || {
156+
let should_commit = cli.yes || {
169157
cliclack::intro(format!("{PKG_NAME} v{PKG_VERSION}"))?;
170158
cliclack::confirm("Are you sure to commit with generated message below?").interact()?
171-
} {
159+
};
160+
161+
if should_commit {
172162
match repository.commit(&message) {
173163
Ok(oid) => {
174164
cliclack::note("Commit successful, last commit ID:", oid)?;
@@ -187,15 +177,36 @@ async fn main() -> std::result::Result<(), Box<dyn Error>> {
187177
trace!("save option is enabled, will save the commit message to a file");
188178

189179
// Save the commit message to the specified file
190-
match save_to_file(&cli.save, &message) {
191-
Ok(f) => {
192-
info!("commit message saved to file: {:?}", f);
193-
}
194-
Err(e) => {
195-
error!("failed to save commit message to file: {}", e);
196-
}
197-
}
180+
save_to_file(&cli.save, &message)
181+
.map(|f| info!("commit message saved to file: {:?}", f))
182+
.unwrap_or_else(|e| error!("failed to save commit message to file: {}", e));
183+
}
184+
185+
Ok(())
186+
}
187+
188+
/// Initialize logging based on verbosity level
189+
#[inline]
190+
fn init_logging(verbose: bool) {
191+
if verbose {
192+
tracing_subscriber::fmt()
193+
.with_max_level(Level::TRACE)
194+
.without_time()
195+
.with_target(false)
196+
.init();
197+
198+
trace!(
199+
"verbose mode enabled, set the log level to TRACE. It will makes a little bit noise."
200+
);
198201
}
202+
}
199203

204+
/// Check if the model is available
205+
async fn check_model_availability(client: &OpenAI, model_name: &str) -> Result<()> {
206+
client.check_model(model_name).await?;
207+
println!(
208+
"the model name `{}` is available, {} is ready for use!",
209+
model_name, PKG_NAME
210+
);
200211
Ok(())
201212
}

0 commit comments

Comments
 (0)