Skip to content

Commit 9781eb5

Browse files
committed
fix based on clippy
1 parent 12c997f commit 9781eb5

File tree

2 files changed

+10
-10
lines changed

2 files changed

+10
-10
lines changed

async-openai/src/client.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,7 @@ impl<C: Config> Client<C> {
381381
// Convert response body to EventSource stream
382382
let stream = response
383383
.bytes_stream()
384-
.map(|result| result.map_err(|e| std::io::Error::other(e)));
384+
.map(|result| result.map_err(std::io::Error::other));
385385
let event_stream = eventsource_stream::EventStream::new(stream);
386386

387387
// Convert EventSource stream to our expected format

async-openai/src/types/responses/response.rs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -760,10 +760,10 @@ pub struct CreateResponse {
760760

761761
///The truncation strategy to use for the model response.
762762
/// - `auto`: If the input to this Response exceeds
763-
/// the model's context window size, the model will truncate the
764-
/// response to fit the context window by dropping items from the beginning of the conversation.
763+
/// the model's context window size, the model will truncate the
764+
/// response to fit the context window by dropping items from the beginning of the conversation.
765765
/// - `disabled` (default): If the input size will exceed the context window
766-
/// size for a model, the request will fail with a 400 error.
766+
/// size for a model, the request will fail with a 400 error.
767767
#[serde(skip_serializing_if = "Option::is_none")]
768768
pub truncation: Option<Truncation>,
769769
}
@@ -2333,10 +2333,10 @@ pub struct Response {
23332333

23342334
///The truncation strategy to use for the model response.
23352335
/// - `auto`: If the input to this Response exceeds
2336-
/// the model's context window size, the model will truncate the
2337-
/// response to fit the context window by dropping items from the beginning of the conversation.
2336+
/// the model's context window size, the model will truncate the
2337+
/// response to fit the context window by dropping items from the beginning of the conversation.
23382338
/// - `disabled` (default): If the input size will exceed the context window
2339-
/// size for a model, the request will fail with a 400 error.
2339+
/// size for a model, the request will fail with a 400 error.
23402340
#[serde(skip_serializing_if = "Option::is_none")]
23412341
pub truncation: Option<Truncation>,
23422342

@@ -2539,10 +2539,10 @@ pub struct TokenCountsBody {
25392539

25402540
///The truncation strategy to use for the model response.
25412541
/// - `auto`: If the input to this Response exceeds
2542-
/// the model's context window size, the model will truncate the
2543-
/// response to fit the context window by dropping items from the beginning of the conversation.
2542+
/// the model's context window size, the model will truncate the
2543+
/// response to fit the context window by dropping items from the beginning of the conversation.
25442544
/// - `disabled` (default): If the input size will exceed the context window
2545-
/// size for a model, the request will fail with a 400 error.
2545+
/// size for a model, the request will fail with a 400 error.
25462546
#[serde(skip_serializing_if = "Option::is_none")]
25472547
pub truncation: Option<Truncation>,
25482548
}

0 commit comments

Comments
 (0)