Skip to content

Commit 4cef7c6

Browse files
[Issue-1961]: Handle clippy::unnecessary_wraps (tensorzero#2856)
1 parent 788a4bf commit 4cef7c6

File tree

7 files changed

+19
-10
lines changed

7 files changed

+19
-10
lines changed

Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ todo = "deny"
8989
trivially_copy_pass_by_ref = "deny"
9090
unimplemented = "deny"
9191
uninlined_format_args = "deny"
92+
unnecessary_wraps = "deny"
9293
unreachable = "deny"
9394
unused_self = "deny"
9495
unwrap_used = "deny"

clients/python/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,7 @@ impl TensorZeroGateway {
587587
}
588588

589589
// TODO - implement closing the 'reqwest' connection pool: https://github.com/tensorzero/tensorzero/issues/857
590+
#[expect(clippy::unnecessary_wraps)]
590591
fn __exit__(
591592
_this: Py<Self>,
592593
_exc_type: Py<PyAny>,

tensorzero-core/src/evaluations/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -776,6 +776,7 @@ impl UninitializedLLMJudgeVariantInfo {
776776
/// We want to make sure that there is an UninitializedLLMJudgeVariantConfig for each VariantConfig.
777777
/// This function should complain at compile time if we forget to update it when adding a new variant type.
778778
#[expect(dead_code)]
779+
#[expect(clippy::unnecessary_wraps)]
779780
fn check_convert_variant_to_llm_judge_variant(
780781
variant: VariantConfig,
781782
) -> Result<UninitializedLLMJudgeVariantConfig, Error> {

tensorzero-core/src/providers/anthropic.rs

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ use crate::model::{
3333
build_creds_caching_default, fully_qualified_name, Credential, CredentialLocation,
3434
ModelProvider,
3535
};
36+
use crate::providers;
3637
use crate::providers::helpers::{
3738
inject_extra_request_data_and_send, inject_extra_request_data_and_send_eventsource,
3839
};
@@ -664,7 +665,7 @@ impl<'a> AnthropicRequestBody<'a> {
664665
.iter()
665666
.map(AnthropicMessage::try_from)
666667
.collect::<Result<Vec<_>, _>>()?;
667-
let messages = prepare_messages(request_messages)?;
668+
let messages = prepare_messages(request_messages);
668669
let messages = if matches!(
669670
request.json_mode,
670671
ModelInferenceRequestJsonMode::On | ModelInferenceRequestJsonMode::Strict
@@ -711,7 +712,9 @@ impl<'a> AnthropicRequestBody<'a> {
711712
/// Modifies the message array to satisfy Anthropic API requirements by:
712713
/// - Prepending a default User message with "[listening]" if the first message is not from a User
713714
/// - Appending a default User message with "[listening]" if the last message is from an Assistant
714-
fn prepare_messages(mut messages: Vec<AnthropicMessage>) -> Result<Vec<AnthropicMessage>, Error> {
715+
fn prepare_messages(
716+
mut messages: Vec<AnthropicMessage>,
717+
) -> std::vec::Vec<providers::anthropic::AnthropicMessage<'_>> {
715718
// Anthropic also requires that there is at least one message and it is a User message.
716719
// If it's not we will prepend a default User message.
717720
match messages.first() {
@@ -745,7 +748,7 @@ fn prepare_messages(mut messages: Vec<AnthropicMessage>) -> Result<Vec<Anthropic
745748
});
746749
}
747750
}
748-
Ok(messages)
751+
messages
749752
}
750753

751754
fn prefill_json_message(messages: Vec<AnthropicMessage>) -> Vec<AnthropicMessage> {
@@ -1765,7 +1768,7 @@ mod tests {
17651768

17661769
// Test case 1: Empty messages - should add listening message
17671770
let messages = vec![];
1768-
let result = prepare_messages(messages).unwrap();
1771+
let result = prepare_messages(messages);
17691772
assert_eq!(result, vec![listening_message.clone()]);
17701773

17711774
// Test case 2: First message is Assistant - should prepend listening message
@@ -1783,7 +1786,7 @@ mod tests {
17831786
})],
17841787
},
17851788
];
1786-
let result = prepare_messages(messages).unwrap();
1789+
let result = prepare_messages(messages);
17871790
assert_eq!(
17881791
result,
17891792
vec![
@@ -1818,7 +1821,7 @@ mod tests {
18181821
})],
18191822
},
18201823
];
1821-
let result = prepare_messages(messages).unwrap();
1824+
let result = prepare_messages(messages);
18221825
assert_eq!(
18231826
result,
18241827
vec![
@@ -1859,7 +1862,7 @@ mod tests {
18591862
})],
18601863
},
18611864
];
1862-
let result = prepare_messages(messages.clone()).unwrap();
1865+
let result = prepare_messages(messages.clone());
18631866
assert_eq!(result, messages);
18641867

18651868
// Test case 5: Both first Assistant and last Assistant - should add listening messages at both ends
@@ -1883,7 +1886,7 @@ mod tests {
18831886
})],
18841887
},
18851888
];
1886-
let result = prepare_messages(messages).unwrap();
1889+
let result = prepare_messages(messages);
18871890
assert_eq!(
18881891
result,
18891892
vec![
@@ -1917,7 +1920,7 @@ mod tests {
19171920
text: "Hi",
19181921
})],
19191922
}];
1920-
let result = prepare_messages(messages).unwrap();
1923+
let result = prepare_messages(messages);
19211924
assert_eq!(
19221925
result,
19231926
vec![
@@ -1939,7 +1942,7 @@ mod tests {
19391942
text: "Hello",
19401943
})],
19411944
}];
1942-
let result = prepare_messages(messages.clone()).unwrap();
1945+
let result = prepare_messages(messages.clone());
19431946
assert_eq!(result, messages);
19441947
}
19451948

tensorzero-core/src/providers/aws_bedrock.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -779,6 +779,7 @@ struct ConverseOutputWithMetadata<'a> {
779779
json_mode: &'a ModelInferenceRequestJsonMode,
780780
}
781781

782+
#[expect(clippy::unnecessary_wraps)]
782783
fn aws_stop_reason_to_tensorzero_finish_reason(stop_reason: StopReason) -> Option<FinishReason> {
783784
match stop_reason {
784785
StopReason::ContentFiltered => Some(FinishReason::ContentFilter),

tensorzero-core/src/providers/gcp_vertex_gemini/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2093,6 +2093,7 @@ pub fn tensorzero_to_gcp_vertex_gemini_content<'a>(
20932093
Ok(message)
20942094
}
20952095

2096+
#[expect(clippy::unnecessary_wraps)]
20962097
pub(crate) fn process_output_schema(output_schema: &Value) -> Result<Value, Error> {
20972098
let mut schema = output_schema.clone();
20982099

tensorzero-core/src/variant/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -571,6 +571,7 @@ impl Variant for VariantInfo {
571571
}
572572

573573
#[expect(clippy::too_many_arguments)]
574+
#[expect(clippy::unnecessary_wraps)]
574575
fn prepare_model_inference_request<'a, 'request>(
575576
messages: Vec<RequestMessage>,
576577
system: Option<String>,

0 commit comments

Comments
 (0)